pci.c 21 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright IBM Corp. 2012
  4. *
  5. * Author(s):
  6. * Jan Glauber <jang@linux.vnet.ibm.com>
  7. *
  8. * The System z PCI code is a rewrite from a prototype by
  9. * the following people (Kudoz!):
  10. * Alexander Schmidt
  11. * Christoph Raisch
  12. * Hannes Hering
  13. * Hoang-Nam Nguyen
  14. * Jan-Bernd Themann
  15. * Stefan Roscher
  16. * Thomas Klein
  17. */
  18. #define KMSG_COMPONENT "zpci"
  19. #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  20. #include <linux/kernel.h>
  21. #include <linux/slab.h>
  22. #include <linux/err.h>
  23. #include <linux/export.h>
  24. #include <linux/delay.h>
  25. #include <linux/irq.h>
  26. #include <linux/kernel_stat.h>
  27. #include <linux/seq_file.h>
  28. #include <linux/pci.h>
  29. #include <linux/msi.h>
  30. #include <asm/isc.h>
  31. #include <asm/airq.h>
  32. #include <asm/facility.h>
  33. #include <asm/pci_insn.h>
  34. #include <asm/pci_clp.h>
  35. #include <asm/pci_dma.h>
  36. #define DEBUG /* enable pr_debug */
  37. #define SIC_IRQ_MODE_ALL 0
  38. #define SIC_IRQ_MODE_SINGLE 1
  39. #define ZPCI_NR_DMA_SPACES 1
  40. #define ZPCI_NR_DEVICES CONFIG_PCI_NR_FUNCTIONS
  41. /* list of all detected zpci devices */
  42. static LIST_HEAD(zpci_list);
  43. static DEFINE_SPINLOCK(zpci_list_lock);
  44. static struct irq_chip zpci_irq_chip = {
  45. .name = "zPCI",
  46. .irq_unmask = pci_msi_unmask_irq,
  47. .irq_mask = pci_msi_mask_irq,
  48. };
  49. static DECLARE_BITMAP(zpci_domain, ZPCI_NR_DEVICES);
  50. static DEFINE_SPINLOCK(zpci_domain_lock);
  51. static struct airq_iv *zpci_aisb_iv;
  52. static struct airq_iv *zpci_aibv[ZPCI_NR_DEVICES];
  53. #define ZPCI_IOMAP_ENTRIES \
  54. min(((unsigned long) ZPCI_NR_DEVICES * PCI_BAR_COUNT / 2), \
  55. ZPCI_IOMAP_MAX_ENTRIES)
  56. static DEFINE_SPINLOCK(zpci_iomap_lock);
  57. static unsigned long *zpci_iomap_bitmap;
  58. struct zpci_iomap_entry *zpci_iomap_start;
  59. EXPORT_SYMBOL_GPL(zpci_iomap_start);
  60. static struct kmem_cache *zdev_fmb_cache;
  61. struct zpci_dev *get_zdev_by_fid(u32 fid)
  62. {
  63. struct zpci_dev *tmp, *zdev = NULL;
  64. spin_lock(&zpci_list_lock);
  65. list_for_each_entry(tmp, &zpci_list, entry) {
  66. if (tmp->fid == fid) {
  67. zdev = tmp;
  68. break;
  69. }
  70. }
  71. spin_unlock(&zpci_list_lock);
  72. return zdev;
  73. }
  74. void zpci_remove_reserved_devices(void)
  75. {
  76. struct zpci_dev *tmp, *zdev;
  77. enum zpci_state state;
  78. LIST_HEAD(remove);
  79. spin_lock(&zpci_list_lock);
  80. list_for_each_entry_safe(zdev, tmp, &zpci_list, entry) {
  81. if (zdev->state == ZPCI_FN_STATE_STANDBY &&
  82. !clp_get_state(zdev->fid, &state) &&
  83. state == ZPCI_FN_STATE_RESERVED)
  84. list_move_tail(&zdev->entry, &remove);
  85. }
  86. spin_unlock(&zpci_list_lock);
  87. list_for_each_entry_safe(zdev, tmp, &remove, entry)
  88. zpci_remove_device(zdev);
  89. }
  90. static struct zpci_dev *get_zdev_by_bus(struct pci_bus *bus)
  91. {
  92. return (bus && bus->sysdata) ? (struct zpci_dev *) bus->sysdata : NULL;
  93. }
  94. int pci_domain_nr(struct pci_bus *bus)
  95. {
  96. return ((struct zpci_dev *) bus->sysdata)->domain;
  97. }
  98. EXPORT_SYMBOL_GPL(pci_domain_nr);
  99. int pci_proc_domain(struct pci_bus *bus)
  100. {
  101. return pci_domain_nr(bus);
  102. }
  103. EXPORT_SYMBOL_GPL(pci_proc_domain);
  104. /* Modify PCI: Register adapter interruptions */
  105. static int zpci_set_airq(struct zpci_dev *zdev)
  106. {
  107. u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_REG_INT);
  108. struct zpci_fib fib = {0};
  109. u8 status;
  110. fib.isc = PCI_ISC;
  111. fib.sum = 1; /* enable summary notifications */
  112. fib.noi = airq_iv_end(zdev->aibv);
  113. fib.aibv = (unsigned long) zdev->aibv->vector;
  114. fib.aibvo = 0; /* each zdev has its own interrupt vector */
  115. fib.aisb = (unsigned long) zpci_aisb_iv->vector + (zdev->aisb/64)*8;
  116. fib.aisbo = zdev->aisb & 63;
  117. return zpci_mod_fc(req, &fib, &status) ? -EIO : 0;
  118. }
  119. /* Modify PCI: Unregister adapter interruptions */
  120. static int zpci_clear_airq(struct zpci_dev *zdev)
  121. {
  122. u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_DEREG_INT);
  123. struct zpci_fib fib = {0};
  124. u8 cc, status;
  125. cc = zpci_mod_fc(req, &fib, &status);
  126. if (cc == 3 || (cc == 1 && status == 24))
  127. /* Function already gone or IRQs already deregistered. */
  128. cc = 0;
  129. return cc ? -EIO : 0;
  130. }
  131. /* Modify PCI: Register I/O address translation parameters */
  132. int zpci_register_ioat(struct zpci_dev *zdev, u8 dmaas,
  133. u64 base, u64 limit, u64 iota)
  134. {
  135. u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, ZPCI_MOD_FC_REG_IOAT);
  136. struct zpci_fib fib = {0};
  137. u8 status;
  138. WARN_ON_ONCE(iota & 0x3fff);
  139. fib.pba = base;
  140. fib.pal = limit;
  141. fib.iota = iota | ZPCI_IOTA_RTTO_FLAG;
  142. return zpci_mod_fc(req, &fib, &status) ? -EIO : 0;
  143. }
  144. /* Modify PCI: Unregister I/O address translation parameters */
  145. int zpci_unregister_ioat(struct zpci_dev *zdev, u8 dmaas)
  146. {
  147. u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, ZPCI_MOD_FC_DEREG_IOAT);
  148. struct zpci_fib fib = {0};
  149. u8 cc, status;
  150. cc = zpci_mod_fc(req, &fib, &status);
  151. if (cc == 3) /* Function already gone. */
  152. cc = 0;
  153. return cc ? -EIO : 0;
  154. }
  155. /* Modify PCI: Set PCI function measurement parameters */
  156. int zpci_fmb_enable_device(struct zpci_dev *zdev)
  157. {
  158. u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_SET_MEASURE);
  159. struct zpci_fib fib = {0};
  160. u8 cc, status;
  161. if (zdev->fmb || sizeof(*zdev->fmb) < zdev->fmb_length)
  162. return -EINVAL;
  163. zdev->fmb = kmem_cache_zalloc(zdev_fmb_cache, GFP_KERNEL);
  164. if (!zdev->fmb)
  165. return -ENOMEM;
  166. WARN_ON((u64) zdev->fmb & 0xf);
  167. /* reset software counters */
  168. atomic64_set(&zdev->allocated_pages, 0);
  169. atomic64_set(&zdev->mapped_pages, 0);
  170. atomic64_set(&zdev->unmapped_pages, 0);
  171. fib.fmb_addr = virt_to_phys(zdev->fmb);
  172. cc = zpci_mod_fc(req, &fib, &status);
  173. if (cc) {
  174. kmem_cache_free(zdev_fmb_cache, zdev->fmb);
  175. zdev->fmb = NULL;
  176. }
  177. return cc ? -EIO : 0;
  178. }
  179. /* Modify PCI: Disable PCI function measurement */
  180. int zpci_fmb_disable_device(struct zpci_dev *zdev)
  181. {
  182. u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_SET_MEASURE);
  183. struct zpci_fib fib = {0};
  184. u8 cc, status;
  185. if (!zdev->fmb)
  186. return -EINVAL;
  187. /* Function measurement is disabled if fmb address is zero */
  188. cc = zpci_mod_fc(req, &fib, &status);
  189. if (cc == 3) /* Function already gone. */
  190. cc = 0;
  191. if (!cc) {
  192. kmem_cache_free(zdev_fmb_cache, zdev->fmb);
  193. zdev->fmb = NULL;
  194. }
  195. return cc ? -EIO : 0;
  196. }
  197. static int zpci_cfg_load(struct zpci_dev *zdev, int offset, u32 *val, u8 len)
  198. {
  199. u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len);
  200. u64 data;
  201. int rc;
  202. rc = zpci_load(&data, req, offset);
  203. if (!rc) {
  204. data = le64_to_cpu((__force __le64) data);
  205. data >>= (8 - len) * 8;
  206. *val = (u32) data;
  207. } else
  208. *val = 0xffffffff;
  209. return rc;
  210. }
  211. static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len)
  212. {
  213. u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len);
  214. u64 data = val;
  215. int rc;
  216. data <<= (8 - len) * 8;
  217. data = (__force u64) cpu_to_le64(data);
  218. rc = zpci_store(data, req, offset);
  219. return rc;
  220. }
  221. resource_size_t pcibios_align_resource(void *data, const struct resource *res,
  222. resource_size_t size,
  223. resource_size_t align)
  224. {
  225. return 0;
  226. }
  227. /* combine single writes by using store-block insn */
  228. void __iowrite64_copy(void __iomem *to, const void *from, size_t count)
  229. {
  230. zpci_memcpy_toio(to, from, count);
  231. }
  232. /* Create a virtual mapping cookie for a PCI BAR */
  233. void __iomem *pci_iomap_range(struct pci_dev *pdev,
  234. int bar,
  235. unsigned long offset,
  236. unsigned long max)
  237. {
  238. struct zpci_dev *zdev = to_zpci(pdev);
  239. int idx;
  240. if (!pci_resource_len(pdev, bar))
  241. return NULL;
  242. idx = zdev->bars[bar].map_idx;
  243. spin_lock(&zpci_iomap_lock);
  244. /* Detect overrun */
  245. WARN_ON(!++zpci_iomap_start[idx].count);
  246. zpci_iomap_start[idx].fh = zdev->fh;
  247. zpci_iomap_start[idx].bar = bar;
  248. spin_unlock(&zpci_iomap_lock);
  249. return (void __iomem *) ZPCI_ADDR(idx) + offset;
  250. }
  251. EXPORT_SYMBOL(pci_iomap_range);
  252. void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
  253. {
  254. return pci_iomap_range(dev, bar, 0, maxlen);
  255. }
  256. EXPORT_SYMBOL(pci_iomap);
  257. void pci_iounmap(struct pci_dev *pdev, void __iomem *addr)
  258. {
  259. unsigned int idx = ZPCI_IDX(addr);
  260. spin_lock(&zpci_iomap_lock);
  261. /* Detect underrun */
  262. WARN_ON(!zpci_iomap_start[idx].count);
  263. if (!--zpci_iomap_start[idx].count) {
  264. zpci_iomap_start[idx].fh = 0;
  265. zpci_iomap_start[idx].bar = 0;
  266. }
  267. spin_unlock(&zpci_iomap_lock);
  268. }
  269. EXPORT_SYMBOL(pci_iounmap);
  270. static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
  271. int size, u32 *val)
  272. {
  273. struct zpci_dev *zdev = get_zdev_by_bus(bus);
  274. int ret;
  275. if (!zdev || devfn != ZPCI_DEVFN)
  276. ret = -ENODEV;
  277. else
  278. ret = zpci_cfg_load(zdev, where, val, size);
  279. return ret;
  280. }
  281. static int pci_write(struct pci_bus *bus, unsigned int devfn, int where,
  282. int size, u32 val)
  283. {
  284. struct zpci_dev *zdev = get_zdev_by_bus(bus);
  285. int ret;
  286. if (!zdev || devfn != ZPCI_DEVFN)
  287. ret = -ENODEV;
  288. else
  289. ret = zpci_cfg_store(zdev, where, val, size);
  290. return ret;
  291. }
  292. static struct pci_ops pci_root_ops = {
  293. .read = pci_read,
  294. .write = pci_write,
  295. };
  296. static void zpci_irq_handler(struct airq_struct *airq)
  297. {
  298. unsigned long si, ai;
  299. struct airq_iv *aibv;
  300. int irqs_on = 0;
  301. inc_irq_stat(IRQIO_PCI);
  302. for (si = 0;;) {
  303. /* Scan adapter summary indicator bit vector */
  304. si = airq_iv_scan(zpci_aisb_iv, si, airq_iv_end(zpci_aisb_iv));
  305. if (si == -1UL) {
  306. if (irqs_on++)
  307. /* End of second scan with interrupts on. */
  308. break;
  309. /* First scan complete, reenable interrupts. */
  310. if (zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC))
  311. break;
  312. si = 0;
  313. continue;
  314. }
  315. /* Scan the adapter interrupt vector for this device. */
  316. aibv = zpci_aibv[si];
  317. for (ai = 0;;) {
  318. ai = airq_iv_scan(aibv, ai, airq_iv_end(aibv));
  319. if (ai == -1UL)
  320. break;
  321. inc_irq_stat(IRQIO_MSI);
  322. airq_iv_lock(aibv, ai);
  323. generic_handle_irq(airq_iv_get_data(aibv, ai));
  324. airq_iv_unlock(aibv, ai);
  325. }
  326. }
  327. }
  328. int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
  329. {
  330. struct zpci_dev *zdev = to_zpci(pdev);
  331. unsigned int hwirq, msi_vecs;
  332. unsigned long aisb;
  333. struct msi_desc *msi;
  334. struct msi_msg msg;
  335. int rc, irq;
  336. zdev->aisb = -1UL;
  337. if (type == PCI_CAP_ID_MSI && nvec > 1)
  338. return 1;
  339. msi_vecs = min_t(unsigned int, nvec, zdev->max_msi);
  340. /* Allocate adapter summary indicator bit */
  341. aisb = airq_iv_alloc_bit(zpci_aisb_iv);
  342. if (aisb == -1UL)
  343. return -EIO;
  344. zdev->aisb = aisb;
  345. /* Create adapter interrupt vector */
  346. zdev->aibv = airq_iv_create(msi_vecs, AIRQ_IV_DATA | AIRQ_IV_BITLOCK);
  347. if (!zdev->aibv)
  348. return -ENOMEM;
  349. /* Wire up shortcut pointer */
  350. zpci_aibv[aisb] = zdev->aibv;
  351. /* Request MSI interrupts */
  352. hwirq = 0;
  353. for_each_pci_msi_entry(msi, pdev) {
  354. if (hwirq >= msi_vecs)
  355. break;
  356. irq = irq_alloc_desc(0); /* Alloc irq on node 0 */
  357. if (irq < 0)
  358. return -ENOMEM;
  359. rc = irq_set_msi_desc(irq, msi);
  360. if (rc)
  361. return rc;
  362. irq_set_chip_and_handler(irq, &zpci_irq_chip,
  363. handle_simple_irq);
  364. msg.data = hwirq;
  365. msg.address_lo = zdev->msi_addr & 0xffffffff;
  366. msg.address_hi = zdev->msi_addr >> 32;
  367. pci_write_msi_msg(irq, &msg);
  368. airq_iv_set_data(zdev->aibv, hwirq, irq);
  369. hwirq++;
  370. }
  371. /* Enable adapter interrupts */
  372. rc = zpci_set_airq(zdev);
  373. if (rc)
  374. return rc;
  375. return (msi_vecs == nvec) ? 0 : msi_vecs;
  376. }
  377. void arch_teardown_msi_irqs(struct pci_dev *pdev)
  378. {
  379. struct zpci_dev *zdev = to_zpci(pdev);
  380. struct msi_desc *msi;
  381. int rc;
  382. /* Disable adapter interrupts */
  383. rc = zpci_clear_airq(zdev);
  384. if (rc)
  385. return;
  386. /* Release MSI interrupts */
  387. for_each_pci_msi_entry(msi, pdev) {
  388. if (!msi->irq)
  389. continue;
  390. if (msi->msi_attrib.is_msix)
  391. __pci_msix_desc_mask_irq(msi, 1);
  392. else
  393. __pci_msi_desc_mask_irq(msi, 1, 1);
  394. irq_set_msi_desc(msi->irq, NULL);
  395. irq_free_desc(msi->irq);
  396. msi->msg.address_lo = 0;
  397. msi->msg.address_hi = 0;
  398. msi->msg.data = 0;
  399. msi->irq = 0;
  400. }
  401. if (zdev->aisb != -1UL) {
  402. zpci_aibv[zdev->aisb] = NULL;
  403. airq_iv_free_bit(zpci_aisb_iv, zdev->aisb);
  404. zdev->aisb = -1UL;
  405. }
  406. if (zdev->aibv) {
  407. airq_iv_release(zdev->aibv);
  408. zdev->aibv = NULL;
  409. }
  410. }
  411. static void zpci_map_resources(struct pci_dev *pdev)
  412. {
  413. resource_size_t len;
  414. int i;
  415. for (i = 0; i < PCI_BAR_COUNT; i++) {
  416. len = pci_resource_len(pdev, i);
  417. if (!len)
  418. continue;
  419. pdev->resource[i].start =
  420. (resource_size_t __force) pci_iomap(pdev, i, 0);
  421. pdev->resource[i].end = pdev->resource[i].start + len - 1;
  422. }
  423. }
  424. static void zpci_unmap_resources(struct pci_dev *pdev)
  425. {
  426. resource_size_t len;
  427. int i;
  428. for (i = 0; i < PCI_BAR_COUNT; i++) {
  429. len = pci_resource_len(pdev, i);
  430. if (!len)
  431. continue;
  432. pci_iounmap(pdev, (void __iomem __force *)
  433. pdev->resource[i].start);
  434. }
  435. }
  436. static struct airq_struct zpci_airq = {
  437. .handler = zpci_irq_handler,
  438. .isc = PCI_ISC,
  439. };
  440. static int __init zpci_irq_init(void)
  441. {
  442. int rc;
  443. rc = register_adapter_interrupt(&zpci_airq);
  444. if (rc)
  445. goto out;
  446. /* Set summary to 1 to be called every time for the ISC. */
  447. *zpci_airq.lsi_ptr = 1;
  448. rc = -ENOMEM;
  449. zpci_aisb_iv = airq_iv_create(ZPCI_NR_DEVICES, AIRQ_IV_ALLOC);
  450. if (!zpci_aisb_iv)
  451. goto out_airq;
  452. zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC);
  453. return 0;
  454. out_airq:
  455. unregister_adapter_interrupt(&zpci_airq);
  456. out:
  457. return rc;
  458. }
  459. static void zpci_irq_exit(void)
  460. {
  461. airq_iv_release(zpci_aisb_iv);
  462. unregister_adapter_interrupt(&zpci_airq);
  463. }
  464. static int zpci_alloc_iomap(struct zpci_dev *zdev)
  465. {
  466. unsigned long entry;
  467. spin_lock(&zpci_iomap_lock);
  468. entry = find_first_zero_bit(zpci_iomap_bitmap, ZPCI_IOMAP_ENTRIES);
  469. if (entry == ZPCI_IOMAP_ENTRIES) {
  470. spin_unlock(&zpci_iomap_lock);
  471. return -ENOSPC;
  472. }
  473. set_bit(entry, zpci_iomap_bitmap);
  474. spin_unlock(&zpci_iomap_lock);
  475. return entry;
  476. }
  477. static void zpci_free_iomap(struct zpci_dev *zdev, int entry)
  478. {
  479. spin_lock(&zpci_iomap_lock);
  480. memset(&zpci_iomap_start[entry], 0, sizeof(struct zpci_iomap_entry));
  481. clear_bit(entry, zpci_iomap_bitmap);
  482. spin_unlock(&zpci_iomap_lock);
  483. }
  484. static struct resource *__alloc_res(struct zpci_dev *zdev, unsigned long start,
  485. unsigned long size, unsigned long flags)
  486. {
  487. struct resource *r;
  488. r = kzalloc(sizeof(*r), GFP_KERNEL);
  489. if (!r)
  490. return NULL;
  491. r->start = start;
  492. r->end = r->start + size - 1;
  493. r->flags = flags;
  494. r->name = zdev->res_name;
  495. if (request_resource(&iomem_resource, r)) {
  496. kfree(r);
  497. return NULL;
  498. }
  499. return r;
  500. }
  501. static int zpci_setup_bus_resources(struct zpci_dev *zdev,
  502. struct list_head *resources)
  503. {
  504. unsigned long addr, size, flags;
  505. struct resource *res;
  506. int i, entry;
  507. snprintf(zdev->res_name, sizeof(zdev->res_name),
  508. "PCI Bus %04x:%02x", zdev->domain, ZPCI_BUS_NR);
  509. for (i = 0; i < PCI_BAR_COUNT; i++) {
  510. if (!zdev->bars[i].size)
  511. continue;
  512. entry = zpci_alloc_iomap(zdev);
  513. if (entry < 0)
  514. return entry;
  515. zdev->bars[i].map_idx = entry;
  516. /* only MMIO is supported */
  517. flags = IORESOURCE_MEM;
  518. if (zdev->bars[i].val & 8)
  519. flags |= IORESOURCE_PREFETCH;
  520. if (zdev->bars[i].val & 4)
  521. flags |= IORESOURCE_MEM_64;
  522. addr = ZPCI_ADDR(entry);
  523. size = 1UL << zdev->bars[i].size;
  524. res = __alloc_res(zdev, addr, size, flags);
  525. if (!res) {
  526. zpci_free_iomap(zdev, entry);
  527. return -ENOMEM;
  528. }
  529. zdev->bars[i].res = res;
  530. pci_add_resource(resources, res);
  531. }
  532. return 0;
  533. }
  534. static void zpci_cleanup_bus_resources(struct zpci_dev *zdev)
  535. {
  536. int i;
  537. for (i = 0; i < PCI_BAR_COUNT; i++) {
  538. if (!zdev->bars[i].size || !zdev->bars[i].res)
  539. continue;
  540. zpci_free_iomap(zdev, zdev->bars[i].map_idx);
  541. release_resource(zdev->bars[i].res);
  542. kfree(zdev->bars[i].res);
  543. }
  544. }
  545. int pcibios_add_device(struct pci_dev *pdev)
  546. {
  547. struct resource *res;
  548. int i;
  549. pdev->dev.groups = zpci_attr_groups;
  550. pdev->dev.dma_ops = &s390_pci_dma_ops;
  551. zpci_map_resources(pdev);
  552. for (i = 0; i < PCI_BAR_COUNT; i++) {
  553. res = &pdev->resource[i];
  554. if (res->parent || !res->flags)
  555. continue;
  556. pci_claim_resource(pdev, i);
  557. }
  558. return 0;
  559. }
  560. void pcibios_release_device(struct pci_dev *pdev)
  561. {
  562. zpci_unmap_resources(pdev);
  563. }
  564. int pcibios_enable_device(struct pci_dev *pdev, int mask)
  565. {
  566. struct zpci_dev *zdev = to_zpci(pdev);
  567. zpci_debug_init_device(zdev, dev_name(&pdev->dev));
  568. zpci_fmb_enable_device(zdev);
  569. return pci_enable_resources(pdev, mask);
  570. }
  571. void pcibios_disable_device(struct pci_dev *pdev)
  572. {
  573. struct zpci_dev *zdev = to_zpci(pdev);
  574. zpci_fmb_disable_device(zdev);
  575. zpci_debug_exit_device(zdev);
  576. }
  577. #ifdef CONFIG_HIBERNATE_CALLBACKS
  578. static int zpci_restore(struct device *dev)
  579. {
  580. struct pci_dev *pdev = to_pci_dev(dev);
  581. struct zpci_dev *zdev = to_zpci(pdev);
  582. int ret = 0;
  583. if (zdev->state != ZPCI_FN_STATE_ONLINE)
  584. goto out;
  585. ret = clp_enable_fh(zdev, ZPCI_NR_DMA_SPACES);
  586. if (ret)
  587. goto out;
  588. zpci_map_resources(pdev);
  589. zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
  590. (u64) zdev->dma_table);
  591. out:
  592. return ret;
  593. }
  594. static int zpci_freeze(struct device *dev)
  595. {
  596. struct pci_dev *pdev = to_pci_dev(dev);
  597. struct zpci_dev *zdev = to_zpci(pdev);
  598. if (zdev->state != ZPCI_FN_STATE_ONLINE)
  599. return 0;
  600. zpci_unregister_ioat(zdev, 0);
  601. zpci_unmap_resources(pdev);
  602. return clp_disable_fh(zdev);
  603. }
  604. struct dev_pm_ops pcibios_pm_ops = {
  605. .thaw_noirq = zpci_restore,
  606. .freeze_noirq = zpci_freeze,
  607. .restore_noirq = zpci_restore,
  608. .poweroff_noirq = zpci_freeze,
  609. };
  610. #endif /* CONFIG_HIBERNATE_CALLBACKS */
  611. static int zpci_alloc_domain(struct zpci_dev *zdev)
  612. {
  613. if (zpci_unique_uid) {
  614. zdev->domain = (u16) zdev->uid;
  615. if (zdev->domain >= ZPCI_NR_DEVICES)
  616. return 0;
  617. spin_lock(&zpci_domain_lock);
  618. if (test_bit(zdev->domain, zpci_domain)) {
  619. spin_unlock(&zpci_domain_lock);
  620. return -EEXIST;
  621. }
  622. set_bit(zdev->domain, zpci_domain);
  623. spin_unlock(&zpci_domain_lock);
  624. return 0;
  625. }
  626. spin_lock(&zpci_domain_lock);
  627. zdev->domain = find_first_zero_bit(zpci_domain, ZPCI_NR_DEVICES);
  628. if (zdev->domain == ZPCI_NR_DEVICES) {
  629. spin_unlock(&zpci_domain_lock);
  630. return -ENOSPC;
  631. }
  632. set_bit(zdev->domain, zpci_domain);
  633. spin_unlock(&zpci_domain_lock);
  634. return 0;
  635. }
  636. static void zpci_free_domain(struct zpci_dev *zdev)
  637. {
  638. if (zdev->domain >= ZPCI_NR_DEVICES)
  639. return;
  640. spin_lock(&zpci_domain_lock);
  641. clear_bit(zdev->domain, zpci_domain);
  642. spin_unlock(&zpci_domain_lock);
  643. }
  644. void pcibios_remove_bus(struct pci_bus *bus)
  645. {
  646. struct zpci_dev *zdev = get_zdev_by_bus(bus);
  647. zpci_exit_slot(zdev);
  648. zpci_cleanup_bus_resources(zdev);
  649. zpci_destroy_iommu(zdev);
  650. zpci_free_domain(zdev);
  651. spin_lock(&zpci_list_lock);
  652. list_del(&zdev->entry);
  653. spin_unlock(&zpci_list_lock);
  654. zpci_dbg(3, "rem fid:%x\n", zdev->fid);
  655. kfree(zdev);
  656. }
  657. static int zpci_scan_bus(struct zpci_dev *zdev)
  658. {
  659. LIST_HEAD(resources);
  660. int ret;
  661. ret = zpci_setup_bus_resources(zdev, &resources);
  662. if (ret)
  663. goto error;
  664. zdev->bus = pci_scan_root_bus(NULL, ZPCI_BUS_NR, &pci_root_ops,
  665. zdev, &resources);
  666. if (!zdev->bus) {
  667. ret = -EIO;
  668. goto error;
  669. }
  670. zdev->bus->max_bus_speed = zdev->max_bus_speed;
  671. pci_bus_add_devices(zdev->bus);
  672. return 0;
  673. error:
  674. zpci_cleanup_bus_resources(zdev);
  675. pci_free_resource_list(&resources);
  676. return ret;
  677. }
  678. int zpci_enable_device(struct zpci_dev *zdev)
  679. {
  680. int rc;
  681. rc = clp_enable_fh(zdev, ZPCI_NR_DMA_SPACES);
  682. if (rc)
  683. goto out;
  684. rc = zpci_dma_init_device(zdev);
  685. if (rc)
  686. goto out_dma;
  687. zdev->state = ZPCI_FN_STATE_ONLINE;
  688. return 0;
  689. out_dma:
  690. clp_disable_fh(zdev);
  691. out:
  692. return rc;
  693. }
  694. EXPORT_SYMBOL_GPL(zpci_enable_device);
  695. int zpci_disable_device(struct zpci_dev *zdev)
  696. {
  697. zpci_dma_exit_device(zdev);
  698. return clp_disable_fh(zdev);
  699. }
  700. EXPORT_SYMBOL_GPL(zpci_disable_device);
  701. int zpci_create_device(struct zpci_dev *zdev)
  702. {
  703. int rc;
  704. rc = zpci_alloc_domain(zdev);
  705. if (rc)
  706. goto out;
  707. rc = zpci_init_iommu(zdev);
  708. if (rc)
  709. goto out_free;
  710. mutex_init(&zdev->lock);
  711. if (zdev->state == ZPCI_FN_STATE_CONFIGURED) {
  712. rc = zpci_enable_device(zdev);
  713. if (rc)
  714. goto out_destroy_iommu;
  715. }
  716. rc = zpci_scan_bus(zdev);
  717. if (rc)
  718. goto out_disable;
  719. spin_lock(&zpci_list_lock);
  720. list_add_tail(&zdev->entry, &zpci_list);
  721. spin_unlock(&zpci_list_lock);
  722. zpci_init_slot(zdev);
  723. return 0;
  724. out_disable:
  725. if (zdev->state == ZPCI_FN_STATE_ONLINE)
  726. zpci_disable_device(zdev);
  727. out_destroy_iommu:
  728. zpci_destroy_iommu(zdev);
  729. out_free:
  730. zpci_free_domain(zdev);
  731. out:
  732. return rc;
  733. }
  734. void zpci_remove_device(struct zpci_dev *zdev)
  735. {
  736. if (!zdev->bus)
  737. return;
  738. pci_stop_root_bus(zdev->bus);
  739. pci_remove_root_bus(zdev->bus);
  740. }
  741. int zpci_report_error(struct pci_dev *pdev,
  742. struct zpci_report_error_header *report)
  743. {
  744. struct zpci_dev *zdev = to_zpci(pdev);
  745. return sclp_pci_report(report, zdev->fh, zdev->fid);
  746. }
  747. EXPORT_SYMBOL(zpci_report_error);
  748. static int zpci_mem_init(void)
  749. {
  750. BUILD_BUG_ON(!is_power_of_2(__alignof__(struct zpci_fmb)) ||
  751. __alignof__(struct zpci_fmb) < sizeof(struct zpci_fmb));
  752. zdev_fmb_cache = kmem_cache_create("PCI_FMB_cache", sizeof(struct zpci_fmb),
  753. __alignof__(struct zpci_fmb), 0, NULL);
  754. if (!zdev_fmb_cache)
  755. goto error_fmb;
  756. zpci_iomap_start = kcalloc(ZPCI_IOMAP_ENTRIES,
  757. sizeof(*zpci_iomap_start), GFP_KERNEL);
  758. if (!zpci_iomap_start)
  759. goto error_iomap;
  760. zpci_iomap_bitmap = kcalloc(BITS_TO_LONGS(ZPCI_IOMAP_ENTRIES),
  761. sizeof(*zpci_iomap_bitmap), GFP_KERNEL);
  762. if (!zpci_iomap_bitmap)
  763. goto error_iomap_bitmap;
  764. return 0;
  765. error_iomap_bitmap:
  766. kfree(zpci_iomap_start);
  767. error_iomap:
  768. kmem_cache_destroy(zdev_fmb_cache);
  769. error_fmb:
  770. return -ENOMEM;
  771. }
  772. static void zpci_mem_exit(void)
  773. {
  774. kfree(zpci_iomap_bitmap);
  775. kfree(zpci_iomap_start);
  776. kmem_cache_destroy(zdev_fmb_cache);
  777. }
  778. static unsigned int s390_pci_probe = 1;
  779. static unsigned int s390_pci_initialized;
  780. char * __init pcibios_setup(char *str)
  781. {
  782. if (!strcmp(str, "off")) {
  783. s390_pci_probe = 0;
  784. return NULL;
  785. }
  786. return str;
  787. }
  788. bool zpci_is_enabled(void)
  789. {
  790. return s390_pci_initialized;
  791. }
  792. static int __init pci_base_init(void)
  793. {
  794. int rc;
  795. if (!s390_pci_probe)
  796. return 0;
  797. if (!test_facility(69) || !test_facility(71))
  798. return 0;
  799. rc = zpci_debug_init();
  800. if (rc)
  801. goto out;
  802. rc = zpci_mem_init();
  803. if (rc)
  804. goto out_mem;
  805. rc = zpci_irq_init();
  806. if (rc)
  807. goto out_irq;
  808. rc = zpci_dma_init();
  809. if (rc)
  810. goto out_dma;
  811. rc = clp_scan_pci_devices();
  812. if (rc)
  813. goto out_find;
  814. s390_pci_initialized = 1;
  815. return 0;
  816. out_find:
  817. zpci_dma_exit();
  818. out_dma:
  819. zpci_irq_exit();
  820. out_irq:
  821. zpci_mem_exit();
  822. out_mem:
  823. zpci_debug_exit();
  824. out:
  825. return rc;
  826. }
  827. subsys_initcall_sync(pci_base_init);
  828. void zpci_rescan(void)
  829. {
  830. if (zpci_is_enabled())
  831. clp_rescan_pci_devices_simple();
  832. }