dca-core.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
  4. */
  5. /*
  6. * This driver supports an interface for DCA clients and providers to meet.
  7. */
  8. #include <linux/kernel.h>
  9. #include <linux/notifier.h>
  10. #include <linux/device.h>
  11. #include <linux/dca.h>
  12. #include <linux/slab.h>
  13. #include <linux/module.h>
  14. #define DCA_VERSION "1.12.1"
  15. MODULE_VERSION(DCA_VERSION);
  16. MODULE_LICENSE("GPL");
  17. MODULE_AUTHOR("Intel Corporation");
  18. static DEFINE_RAW_SPINLOCK(dca_lock);
  19. static LIST_HEAD(dca_domains);
  20. static BLOCKING_NOTIFIER_HEAD(dca_provider_chain);
  21. static int dca_providers_blocked;
  22. static struct pci_bus *dca_pci_rc_from_dev(struct device *dev)
  23. {
  24. struct pci_dev *pdev = to_pci_dev(dev);
  25. struct pci_bus *bus = pdev->bus;
  26. while (bus->parent)
  27. bus = bus->parent;
  28. return bus;
  29. }
  30. static struct dca_domain *dca_allocate_domain(struct pci_bus *rc)
  31. {
  32. struct dca_domain *domain;
  33. domain = kzalloc(sizeof(*domain), GFP_NOWAIT);
  34. if (!domain)
  35. return NULL;
  36. INIT_LIST_HEAD(&domain->dca_providers);
  37. domain->pci_rc = rc;
  38. return domain;
  39. }
  40. static void dca_free_domain(struct dca_domain *domain)
  41. {
  42. list_del(&domain->node);
  43. kfree(domain);
  44. }
  45. static int dca_provider_ioat_ver_3_0(struct device *dev)
  46. {
  47. struct pci_dev *pdev = to_pci_dev(dev);
  48. return ((pdev->vendor == PCI_VENDOR_ID_INTEL) &&
  49. ((pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG0) ||
  50. (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG1) ||
  51. (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG2) ||
  52. (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG3) ||
  53. (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG4) ||
  54. (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG5) ||
  55. (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG6) ||
  56. (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG7)));
  57. }
  58. static void unregister_dca_providers(void)
  59. {
  60. struct dca_provider *dca, *_dca;
  61. struct list_head unregistered_providers;
  62. struct dca_domain *domain;
  63. unsigned long flags;
  64. blocking_notifier_call_chain(&dca_provider_chain,
  65. DCA_PROVIDER_REMOVE, NULL);
  66. INIT_LIST_HEAD(&unregistered_providers);
  67. raw_spin_lock_irqsave(&dca_lock, flags);
  68. if (list_empty(&dca_domains)) {
  69. raw_spin_unlock_irqrestore(&dca_lock, flags);
  70. return;
  71. }
  72. /* at this point only one domain in the list is expected */
  73. domain = list_first_entry(&dca_domains, struct dca_domain, node);
  74. list_for_each_entry_safe(dca, _dca, &domain->dca_providers, node)
  75. list_move(&dca->node, &unregistered_providers);
  76. dca_free_domain(domain);
  77. raw_spin_unlock_irqrestore(&dca_lock, flags);
  78. list_for_each_entry_safe(dca, _dca, &unregistered_providers, node) {
  79. dca_sysfs_remove_provider(dca);
  80. list_del(&dca->node);
  81. }
  82. }
  83. static struct dca_domain *dca_find_domain(struct pci_bus *rc)
  84. {
  85. struct dca_domain *domain;
  86. list_for_each_entry(domain, &dca_domains, node)
  87. if (domain->pci_rc == rc)
  88. return domain;
  89. return NULL;
  90. }
  91. static struct dca_domain *dca_get_domain(struct device *dev)
  92. {
  93. struct pci_bus *rc;
  94. struct dca_domain *domain;
  95. rc = dca_pci_rc_from_dev(dev);
  96. domain = dca_find_domain(rc);
  97. if (!domain) {
  98. if (dca_provider_ioat_ver_3_0(dev) && !list_empty(&dca_domains))
  99. dca_providers_blocked = 1;
  100. }
  101. return domain;
  102. }
  103. static struct dca_provider *dca_find_provider_by_dev(struct device *dev)
  104. {
  105. struct dca_provider *dca;
  106. struct pci_bus *rc;
  107. struct dca_domain *domain;
  108. if (dev) {
  109. rc = dca_pci_rc_from_dev(dev);
  110. domain = dca_find_domain(rc);
  111. if (!domain)
  112. return NULL;
  113. } else {
  114. if (!list_empty(&dca_domains))
  115. domain = list_first_entry(&dca_domains,
  116. struct dca_domain,
  117. node);
  118. else
  119. return NULL;
  120. }
  121. list_for_each_entry(dca, &domain->dca_providers, node)
  122. if ((!dev) || (dca->ops->dev_managed(dca, dev)))
  123. return dca;
  124. return NULL;
  125. }
  126. /**
  127. * dca_add_requester - add a dca client to the list
  128. * @dev - the device that wants dca service
  129. */
  130. int dca_add_requester(struct device *dev)
  131. {
  132. struct dca_provider *dca;
  133. int err, slot = -ENODEV;
  134. unsigned long flags;
  135. struct pci_bus *pci_rc;
  136. struct dca_domain *domain;
  137. if (!dev)
  138. return -EFAULT;
  139. raw_spin_lock_irqsave(&dca_lock, flags);
  140. /* check if the requester has not been added already */
  141. dca = dca_find_provider_by_dev(dev);
  142. if (dca) {
  143. raw_spin_unlock_irqrestore(&dca_lock, flags);
  144. return -EEXIST;
  145. }
  146. pci_rc = dca_pci_rc_from_dev(dev);
  147. domain = dca_find_domain(pci_rc);
  148. if (!domain) {
  149. raw_spin_unlock_irqrestore(&dca_lock, flags);
  150. return -ENODEV;
  151. }
  152. list_for_each_entry(dca, &domain->dca_providers, node) {
  153. slot = dca->ops->add_requester(dca, dev);
  154. if (slot >= 0)
  155. break;
  156. }
  157. raw_spin_unlock_irqrestore(&dca_lock, flags);
  158. if (slot < 0)
  159. return slot;
  160. err = dca_sysfs_add_req(dca, dev, slot);
  161. if (err) {
  162. raw_spin_lock_irqsave(&dca_lock, flags);
  163. if (dca == dca_find_provider_by_dev(dev))
  164. dca->ops->remove_requester(dca, dev);
  165. raw_spin_unlock_irqrestore(&dca_lock, flags);
  166. return err;
  167. }
  168. return 0;
  169. }
  170. EXPORT_SYMBOL_GPL(dca_add_requester);
  171. /**
  172. * dca_remove_requester - remove a dca client from the list
  173. * @dev - the device that wants dca service
  174. */
  175. int dca_remove_requester(struct device *dev)
  176. {
  177. struct dca_provider *dca;
  178. int slot;
  179. unsigned long flags;
  180. if (!dev)
  181. return -EFAULT;
  182. raw_spin_lock_irqsave(&dca_lock, flags);
  183. dca = dca_find_provider_by_dev(dev);
  184. if (!dca) {
  185. raw_spin_unlock_irqrestore(&dca_lock, flags);
  186. return -ENODEV;
  187. }
  188. slot = dca->ops->remove_requester(dca, dev);
  189. raw_spin_unlock_irqrestore(&dca_lock, flags);
  190. if (slot < 0)
  191. return slot;
  192. dca_sysfs_remove_req(dca, slot);
  193. return 0;
  194. }
  195. EXPORT_SYMBOL_GPL(dca_remove_requester);
  196. /**
  197. * dca_common_get_tag - return the dca tag (serves both new and old api)
  198. * @dev - the device that wants dca service
  199. * @cpu - the cpuid as returned by get_cpu()
  200. */
  201. static u8 dca_common_get_tag(struct device *dev, int cpu)
  202. {
  203. struct dca_provider *dca;
  204. u8 tag;
  205. unsigned long flags;
  206. raw_spin_lock_irqsave(&dca_lock, flags);
  207. dca = dca_find_provider_by_dev(dev);
  208. if (!dca) {
  209. raw_spin_unlock_irqrestore(&dca_lock, flags);
  210. return -ENODEV;
  211. }
  212. tag = dca->ops->get_tag(dca, dev, cpu);
  213. raw_spin_unlock_irqrestore(&dca_lock, flags);
  214. return tag;
  215. }
  216. /**
  217. * dca3_get_tag - return the dca tag to the requester device
  218. * for the given cpu (new api)
  219. * @dev - the device that wants dca service
  220. * @cpu - the cpuid as returned by get_cpu()
  221. */
  222. u8 dca3_get_tag(struct device *dev, int cpu)
  223. {
  224. if (!dev)
  225. return -EFAULT;
  226. return dca_common_get_tag(dev, cpu);
  227. }
  228. EXPORT_SYMBOL_GPL(dca3_get_tag);
  229. /**
  230. * dca_get_tag - return the dca tag for the given cpu (old api)
  231. * @cpu - the cpuid as returned by get_cpu()
  232. */
  233. u8 dca_get_tag(int cpu)
  234. {
  235. struct device *dev = NULL;
  236. return dca_common_get_tag(dev, cpu);
  237. }
  238. EXPORT_SYMBOL_GPL(dca_get_tag);
  239. /**
  240. * alloc_dca_provider - get data struct for describing a dca provider
  241. * @ops - pointer to struct of dca operation function pointers
  242. * @priv_size - size of extra mem to be added for provider's needs
  243. */
  244. struct dca_provider *alloc_dca_provider(const struct dca_ops *ops,
  245. int priv_size)
  246. {
  247. struct dca_provider *dca;
  248. int alloc_size;
  249. alloc_size = (sizeof(*dca) + priv_size);
  250. dca = kzalloc(alloc_size, GFP_KERNEL);
  251. if (!dca)
  252. return NULL;
  253. dca->ops = ops;
  254. return dca;
  255. }
  256. EXPORT_SYMBOL_GPL(alloc_dca_provider);
  257. /**
  258. * free_dca_provider - release the dca provider data struct
  259. * @ops - pointer to struct of dca operation function pointers
  260. * @priv_size - size of extra mem to be added for provider's needs
  261. */
  262. void free_dca_provider(struct dca_provider *dca)
  263. {
  264. kfree(dca);
  265. }
  266. EXPORT_SYMBOL_GPL(free_dca_provider);
  267. /**
  268. * register_dca_provider - register a dca provider
  269. * @dca - struct created by alloc_dca_provider()
  270. * @dev - device providing dca services
  271. */
  272. int register_dca_provider(struct dca_provider *dca, struct device *dev)
  273. {
  274. int err;
  275. unsigned long flags;
  276. struct dca_domain *domain, *newdomain = NULL;
  277. raw_spin_lock_irqsave(&dca_lock, flags);
  278. if (dca_providers_blocked) {
  279. raw_spin_unlock_irqrestore(&dca_lock, flags);
  280. return -ENODEV;
  281. }
  282. raw_spin_unlock_irqrestore(&dca_lock, flags);
  283. err = dca_sysfs_add_provider(dca, dev);
  284. if (err)
  285. return err;
  286. raw_spin_lock_irqsave(&dca_lock, flags);
  287. domain = dca_get_domain(dev);
  288. if (!domain) {
  289. struct pci_bus *rc;
  290. if (dca_providers_blocked) {
  291. raw_spin_unlock_irqrestore(&dca_lock, flags);
  292. dca_sysfs_remove_provider(dca);
  293. unregister_dca_providers();
  294. return -ENODEV;
  295. }
  296. raw_spin_unlock_irqrestore(&dca_lock, flags);
  297. rc = dca_pci_rc_from_dev(dev);
  298. newdomain = dca_allocate_domain(rc);
  299. if (!newdomain)
  300. return -ENODEV;
  301. raw_spin_lock_irqsave(&dca_lock, flags);
  302. /* Recheck, we might have raced after dropping the lock */
  303. domain = dca_get_domain(dev);
  304. if (!domain) {
  305. domain = newdomain;
  306. newdomain = NULL;
  307. list_add(&domain->node, &dca_domains);
  308. }
  309. }
  310. list_add(&dca->node, &domain->dca_providers);
  311. raw_spin_unlock_irqrestore(&dca_lock, flags);
  312. blocking_notifier_call_chain(&dca_provider_chain,
  313. DCA_PROVIDER_ADD, NULL);
  314. kfree(newdomain);
  315. return 0;
  316. }
  317. EXPORT_SYMBOL_GPL(register_dca_provider);
  318. /**
  319. * unregister_dca_provider - remove a dca provider
  320. * @dca - struct created by alloc_dca_provider()
  321. */
  322. void unregister_dca_provider(struct dca_provider *dca, struct device *dev)
  323. {
  324. unsigned long flags;
  325. struct pci_bus *pci_rc;
  326. struct dca_domain *domain;
  327. blocking_notifier_call_chain(&dca_provider_chain,
  328. DCA_PROVIDER_REMOVE, NULL);
  329. raw_spin_lock_irqsave(&dca_lock, flags);
  330. if (list_empty(&dca_domains)) {
  331. raw_spin_unlock_irqrestore(&dca_lock, flags);
  332. return;
  333. }
  334. list_del(&dca->node);
  335. pci_rc = dca_pci_rc_from_dev(dev);
  336. domain = dca_find_domain(pci_rc);
  337. if (list_empty(&domain->dca_providers))
  338. dca_free_domain(domain);
  339. raw_spin_unlock_irqrestore(&dca_lock, flags);
  340. dca_sysfs_remove_provider(dca);
  341. }
  342. EXPORT_SYMBOL_GPL(unregister_dca_provider);
  343. /**
  344. * dca_register_notify - register a client's notifier callback
  345. */
  346. void dca_register_notify(struct notifier_block *nb)
  347. {
  348. blocking_notifier_chain_register(&dca_provider_chain, nb);
  349. }
  350. EXPORT_SYMBOL_GPL(dca_register_notify);
  351. /**
  352. * dca_unregister_notify - remove a client's notifier callback
  353. */
  354. void dca_unregister_notify(struct notifier_block *nb)
  355. {
  356. blocking_notifier_chain_unregister(&dca_provider_chain, nb);
  357. }
  358. EXPORT_SYMBOL_GPL(dca_unregister_notify);
  359. static int __init dca_init(void)
  360. {
  361. pr_info("dca service started, version %s\n", DCA_VERSION);
  362. return dca_sysfs_init();
  363. }
  364. static void __exit dca_exit(void)
  365. {
  366. dca_sysfs_exit();
  367. }
  368. arch_initcall(dca_init);
  369. module_exit(dca_exit);