vpci.c 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * PCI Backend - Provides a Virtual PCI bus (with real devices)
  4. * to the frontend
  5. *
  6. * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
  7. */
  8. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  9. #include <linux/list.h>
  10. #include <linux/slab.h>
  11. #include <linux/pci.h>
  12. #include <linux/mutex.h>
  13. #include "pciback.h"
  14. #define PCI_SLOT_MAX 32
  15. struct vpci_dev_data {
  16. /* Access to dev_list must be protected by lock */
  17. struct list_head dev_list[PCI_SLOT_MAX];
  18. struct mutex lock;
  19. };
  20. static inline struct list_head *list_first(struct list_head *head)
  21. {
  22. return head->next;
  23. }
  24. static struct pci_dev *__xen_pcibk_get_pci_dev(struct xen_pcibk_device *pdev,
  25. unsigned int domain,
  26. unsigned int bus,
  27. unsigned int devfn)
  28. {
  29. struct pci_dev_entry *entry;
  30. struct pci_dev *dev = NULL;
  31. struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
  32. if (domain != 0 || bus != 0)
  33. return NULL;
  34. if (PCI_SLOT(devfn) < PCI_SLOT_MAX) {
  35. mutex_lock(&vpci_dev->lock);
  36. list_for_each_entry(entry,
  37. &vpci_dev->dev_list[PCI_SLOT(devfn)],
  38. list) {
  39. if (PCI_FUNC(entry->dev->devfn) == PCI_FUNC(devfn)) {
  40. dev = entry->dev;
  41. break;
  42. }
  43. }
  44. mutex_unlock(&vpci_dev->lock);
  45. }
  46. return dev;
  47. }
  48. static inline int match_slot(struct pci_dev *l, struct pci_dev *r)
  49. {
  50. if (pci_domain_nr(l->bus) == pci_domain_nr(r->bus)
  51. && l->bus == r->bus && PCI_SLOT(l->devfn) == PCI_SLOT(r->devfn))
  52. return 1;
  53. return 0;
  54. }
  55. static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
  56. struct pci_dev *dev, int devid,
  57. publish_pci_dev_cb publish_cb)
  58. {
  59. int err = 0, slot, func = -1;
  60. struct pci_dev_entry *t, *dev_entry;
  61. struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
  62. if ((dev->class >> 24) == PCI_BASE_CLASS_BRIDGE) {
  63. err = -EFAULT;
  64. xenbus_dev_fatal(pdev->xdev, err,
  65. "Can't export bridges on the virtual PCI bus");
  66. goto out;
  67. }
  68. dev_entry = kmalloc(sizeof(*dev_entry), GFP_KERNEL);
  69. if (!dev_entry) {
  70. err = -ENOMEM;
  71. xenbus_dev_fatal(pdev->xdev, err,
  72. "Error adding entry to virtual PCI bus");
  73. goto out;
  74. }
  75. dev_entry->dev = dev;
  76. mutex_lock(&vpci_dev->lock);
  77. /*
  78. * Keep multi-function devices together on the virtual PCI bus, except
  79. * virtual functions.
  80. */
  81. if (!dev->is_virtfn) {
  82. for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
  83. if (list_empty(&vpci_dev->dev_list[slot]))
  84. continue;
  85. t = list_entry(list_first(&vpci_dev->dev_list[slot]),
  86. struct pci_dev_entry, list);
  87. if (match_slot(dev, t->dev)) {
  88. pr_info("vpci: %s: assign to virtual slot %d func %d\n",
  89. pci_name(dev), slot,
  90. PCI_FUNC(dev->devfn));
  91. list_add_tail(&dev_entry->list,
  92. &vpci_dev->dev_list[slot]);
  93. func = PCI_FUNC(dev->devfn);
  94. goto unlock;
  95. }
  96. }
  97. }
  98. /* Assign to a new slot on the virtual PCI bus */
  99. for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
  100. if (list_empty(&vpci_dev->dev_list[slot])) {
  101. pr_info("vpci: %s: assign to virtual slot %d\n",
  102. pci_name(dev), slot);
  103. list_add_tail(&dev_entry->list,
  104. &vpci_dev->dev_list[slot]);
  105. func = dev->is_virtfn ? 0 : PCI_FUNC(dev->devfn);
  106. goto unlock;
  107. }
  108. }
  109. err = -ENOMEM;
  110. xenbus_dev_fatal(pdev->xdev, err,
  111. "No more space on root virtual PCI bus");
  112. unlock:
  113. mutex_unlock(&vpci_dev->lock);
  114. /* Publish this device. */
  115. if (!err)
  116. err = publish_cb(pdev, 0, 0, PCI_DEVFN(slot, func), devid);
  117. else
  118. kfree(dev_entry);
  119. out:
  120. return err;
  121. }
  122. static void __xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev,
  123. struct pci_dev *dev, bool lock)
  124. {
  125. int slot;
  126. struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
  127. struct pci_dev *found_dev = NULL;
  128. mutex_lock(&vpci_dev->lock);
  129. for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
  130. struct pci_dev_entry *e;
  131. list_for_each_entry(e, &vpci_dev->dev_list[slot], list) {
  132. if (e->dev == dev) {
  133. list_del(&e->list);
  134. found_dev = e->dev;
  135. kfree(e);
  136. goto out;
  137. }
  138. }
  139. }
  140. out:
  141. mutex_unlock(&vpci_dev->lock);
  142. if (found_dev) {
  143. if (lock)
  144. device_lock(&found_dev->dev);
  145. pcistub_put_pci_dev(found_dev);
  146. if (lock)
  147. device_unlock(&found_dev->dev);
  148. }
  149. }
  150. static int __xen_pcibk_init_devices(struct xen_pcibk_device *pdev)
  151. {
  152. int slot;
  153. struct vpci_dev_data *vpci_dev;
  154. vpci_dev = kmalloc(sizeof(*vpci_dev), GFP_KERNEL);
  155. if (!vpci_dev)
  156. return -ENOMEM;
  157. mutex_init(&vpci_dev->lock);
  158. for (slot = 0; slot < PCI_SLOT_MAX; slot++)
  159. INIT_LIST_HEAD(&vpci_dev->dev_list[slot]);
  160. pdev->pci_dev_data = vpci_dev;
  161. return 0;
  162. }
  163. static int __xen_pcibk_publish_pci_roots(struct xen_pcibk_device *pdev,
  164. publish_pci_root_cb publish_cb)
  165. {
  166. /* The Virtual PCI bus has only one root */
  167. return publish_cb(pdev, 0, 0);
  168. }
  169. static void __xen_pcibk_release_devices(struct xen_pcibk_device *pdev)
  170. {
  171. int slot;
  172. struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
  173. for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
  174. struct pci_dev_entry *e, *tmp;
  175. list_for_each_entry_safe(e, tmp, &vpci_dev->dev_list[slot],
  176. list) {
  177. struct pci_dev *dev = e->dev;
  178. list_del(&e->list);
  179. device_lock(&dev->dev);
  180. pcistub_put_pci_dev(dev);
  181. device_unlock(&dev->dev);
  182. kfree(e);
  183. }
  184. }
  185. kfree(vpci_dev);
  186. pdev->pci_dev_data = NULL;
  187. }
  188. static int __xen_pcibk_get_pcifront_dev(struct pci_dev *pcidev,
  189. struct xen_pcibk_device *pdev,
  190. unsigned int *domain, unsigned int *bus,
  191. unsigned int *devfn)
  192. {
  193. struct pci_dev_entry *entry;
  194. struct pci_dev *dev = NULL;
  195. struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
  196. int found = 0, slot;
  197. mutex_lock(&vpci_dev->lock);
  198. for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
  199. list_for_each_entry(entry,
  200. &vpci_dev->dev_list[slot],
  201. list) {
  202. dev = entry->dev;
  203. if (dev && dev->bus->number == pcidev->bus->number
  204. && pci_domain_nr(dev->bus) ==
  205. pci_domain_nr(pcidev->bus)
  206. && dev->devfn == pcidev->devfn) {
  207. found = 1;
  208. *domain = 0;
  209. *bus = 0;
  210. *devfn = PCI_DEVFN(slot,
  211. PCI_FUNC(pcidev->devfn));
  212. }
  213. }
  214. }
  215. mutex_unlock(&vpci_dev->lock);
  216. return found;
  217. }
  218. const struct xen_pcibk_backend xen_pcibk_vpci_backend = {
  219. .name = "vpci",
  220. .init = __xen_pcibk_init_devices,
  221. .free = __xen_pcibk_release_devices,
  222. .find = __xen_pcibk_get_pcifront_dev,
  223. .publish = __xen_pcibk_publish_pci_roots,
  224. .release = __xen_pcibk_release_pci_dev,
  225. .add = __xen_pcibk_add_pci_dev,
  226. .get = __xen_pcibk_get_pci_dev,
  227. };