vphb.c 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271
  1. /*
  2. * Copyright 2014 IBM Corp.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version
  7. * 2 of the License, or (at your option) any later version.
  8. */
  9. #include <linux/pci.h>
  10. #include <misc/cxl.h>
  11. #include "cxl.h"
  12. static int cxl_dma_set_mask(struct pci_dev *pdev, u64 dma_mask)
  13. {
  14. if (dma_mask < DMA_BIT_MASK(64)) {
  15. pr_info("%s only 64bit DMA supported on CXL", __func__);
  16. return -EIO;
  17. }
  18. *(pdev->dev.dma_mask) = dma_mask;
  19. return 0;
  20. }
  21. static int cxl_pci_probe_mode(struct pci_bus *bus)
  22. {
  23. return PCI_PROBE_NORMAL;
  24. }
  25. static int cxl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
  26. {
  27. return -ENODEV;
  28. }
  29. static void cxl_teardown_msi_irqs(struct pci_dev *pdev)
  30. {
  31. /*
  32. * MSI should never be set but need still need to provide this call
  33. * back.
  34. */
  35. }
  36. static bool cxl_pci_enable_device_hook(struct pci_dev *dev)
  37. {
  38. struct pci_controller *phb;
  39. struct cxl_afu *afu;
  40. struct cxl_context *ctx;
  41. phb = pci_bus_to_host(dev->bus);
  42. afu = (struct cxl_afu *)phb->private_data;
  43. set_dma_ops(&dev->dev, &dma_direct_ops);
  44. set_dma_offset(&dev->dev, PAGE_OFFSET);
  45. /*
  46. * Allocate a context to do cxl things too. If we eventually do real
  47. * DMA ops, we'll need a default context to attach them to
  48. */
  49. ctx = cxl_dev_context_init(dev);
  50. if (!ctx)
  51. return false;
  52. dev->dev.archdata.cxl_ctx = ctx;
  53. return (cxl_afu_check_and_enable(afu) == 0);
  54. }
  55. static void cxl_pci_disable_device(struct pci_dev *dev)
  56. {
  57. struct cxl_context *ctx = cxl_get_context(dev);
  58. if (ctx) {
  59. if (ctx->status == STARTED) {
  60. dev_err(&dev->dev, "Default context started\n");
  61. return;
  62. }
  63. dev->dev.archdata.cxl_ctx = NULL;
  64. cxl_release_context(ctx);
  65. }
  66. }
  67. static resource_size_t cxl_pci_window_alignment(struct pci_bus *bus,
  68. unsigned long type)
  69. {
  70. return 1;
  71. }
  72. static void cxl_pci_reset_secondary_bus(struct pci_dev *dev)
  73. {
  74. /* Should we do an AFU reset here ? */
  75. }
  76. static int cxl_pcie_cfg_record(u8 bus, u8 devfn)
  77. {
  78. return (bus << 8) + devfn;
  79. }
  80. static unsigned long cxl_pcie_cfg_addr(struct pci_controller* phb,
  81. u8 bus, u8 devfn, int offset)
  82. {
  83. int record = cxl_pcie_cfg_record(bus, devfn);
  84. return (unsigned long)phb->cfg_addr + ((unsigned long)phb->cfg_data * record) + offset;
  85. }
  86. static int cxl_pcie_config_info(struct pci_bus *bus, unsigned int devfn,
  87. int offset, int len,
  88. volatile void __iomem **ioaddr,
  89. u32 *mask, int *shift)
  90. {
  91. struct pci_controller *phb;
  92. struct cxl_afu *afu;
  93. unsigned long addr;
  94. phb = pci_bus_to_host(bus);
  95. afu = (struct cxl_afu *)phb->private_data;
  96. if (phb == NULL)
  97. return PCIBIOS_DEVICE_NOT_FOUND;
  98. if (cxl_pcie_cfg_record(bus->number, devfn) > afu->crs_num)
  99. return PCIBIOS_DEVICE_NOT_FOUND;
  100. if (offset >= (unsigned long)phb->cfg_data)
  101. return PCIBIOS_BAD_REGISTER_NUMBER;
  102. addr = cxl_pcie_cfg_addr(phb, bus->number, devfn, offset);
  103. *ioaddr = (void *)(addr & ~0x3ULL);
  104. *shift = ((addr & 0x3) * 8);
  105. switch (len) {
  106. case 1:
  107. *mask = 0xff;
  108. break;
  109. case 2:
  110. *mask = 0xffff;
  111. break;
  112. default:
  113. *mask = 0xffffffff;
  114. break;
  115. }
  116. return 0;
  117. }
  118. static int cxl_pcie_read_config(struct pci_bus *bus, unsigned int devfn,
  119. int offset, int len, u32 *val)
  120. {
  121. volatile void __iomem *ioaddr;
  122. int shift, rc;
  123. u32 mask;
  124. rc = cxl_pcie_config_info(bus, devfn, offset, len, &ioaddr,
  125. &mask, &shift);
  126. if (rc)
  127. return rc;
  128. /* Can only read 32 bits */
  129. *val = (in_le32(ioaddr) >> shift) & mask;
  130. return PCIBIOS_SUCCESSFUL;
  131. }
  132. static int cxl_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
  133. int offset, int len, u32 val)
  134. {
  135. volatile void __iomem *ioaddr;
  136. u32 v, mask;
  137. int shift, rc;
  138. rc = cxl_pcie_config_info(bus, devfn, offset, len, &ioaddr,
  139. &mask, &shift);
  140. if (rc)
  141. return rc;
  142. /* Can only write 32 bits so do read-modify-write */
  143. mask <<= shift;
  144. val <<= shift;
  145. v = (in_le32(ioaddr) & ~mask) || (val & mask);
  146. out_le32(ioaddr, v);
  147. return PCIBIOS_SUCCESSFUL;
  148. }
  149. static struct pci_ops cxl_pcie_pci_ops =
  150. {
  151. .read = cxl_pcie_read_config,
  152. .write = cxl_pcie_write_config,
  153. };
  154. static struct pci_controller_ops cxl_pci_controller_ops =
  155. {
  156. .probe_mode = cxl_pci_probe_mode,
  157. .enable_device_hook = cxl_pci_enable_device_hook,
  158. .disable_device = cxl_pci_disable_device,
  159. .release_device = cxl_pci_disable_device,
  160. .window_alignment = cxl_pci_window_alignment,
  161. .reset_secondary_bus = cxl_pci_reset_secondary_bus,
  162. .setup_msi_irqs = cxl_setup_msi_irqs,
  163. .teardown_msi_irqs = cxl_teardown_msi_irqs,
  164. .dma_set_mask = cxl_dma_set_mask,
  165. };
  166. int cxl_pci_vphb_add(struct cxl_afu *afu)
  167. {
  168. struct pci_dev *phys_dev;
  169. struct pci_controller *phb, *phys_phb;
  170. phys_dev = to_pci_dev(afu->adapter->dev.parent);
  171. phys_phb = pci_bus_to_host(phys_dev->bus);
  172. /* Alloc and setup PHB data structure */
  173. phb = pcibios_alloc_controller(phys_phb->dn);
  174. if (!phb)
  175. return -ENODEV;
  176. /* Setup parent in sysfs */
  177. phb->parent = &phys_dev->dev;
  178. /* Setup the PHB using arch provided callback */
  179. phb->ops = &cxl_pcie_pci_ops;
  180. phb->cfg_addr = afu->afu_desc_mmio + afu->crs_offset;
  181. phb->cfg_data = (void *)(u64)afu->crs_len;
  182. phb->private_data = afu;
  183. phb->controller_ops = cxl_pci_controller_ops;
  184. /* Scan the bus */
  185. pcibios_scan_phb(phb);
  186. if (phb->bus == NULL)
  187. return -ENXIO;
  188. /* Claim resources. This might need some rework as well depending
  189. * whether we are doing probe-only or not, like assigning unassigned
  190. * resources etc...
  191. */
  192. pcibios_claim_one_bus(phb->bus);
  193. /* Add probed PCI devices to the device model */
  194. pci_bus_add_devices(phb->bus);
  195. afu->phb = phb;
  196. return 0;
  197. }
  198. void cxl_pci_vphb_remove(struct cxl_afu *afu)
  199. {
  200. struct pci_controller *phb;
  201. /* If there is no configuration record we won't have one of these */
  202. if (!afu || !afu->phb)
  203. return;
  204. phb = afu->phb;
  205. pci_remove_root_bus(phb->bus);
  206. }
  207. struct cxl_afu *cxl_pci_to_afu(struct pci_dev *dev)
  208. {
  209. struct pci_controller *phb;
  210. phb = pci_bus_to_host(dev->bus);
  211. return (struct cxl_afu *)phb->private_data;
  212. }
  213. EXPORT_SYMBOL_GPL(cxl_pci_to_afu);
  214. unsigned int cxl_pci_to_cfg_record(struct pci_dev *dev)
  215. {
  216. return cxl_pcie_cfg_record(dev->bus->number, dev->devfn);
  217. }
  218. EXPORT_SYMBOL_GPL(cxl_pci_to_cfg_record);