pci.c 32 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193
  1. /*
  2. * Copyright 2014 IBM Corp.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version
  7. * 2 of the License, or (at your option) any later version.
  8. */
  9. #include <linux/pci_regs.h>
  10. #include <linux/pci_ids.h>
  11. #include <linux/device.h>
  12. #include <linux/module.h>
  13. #include <linux/kernel.h>
  14. #include <linux/slab.h>
  15. #include <linux/sort.h>
  16. #include <linux/pci.h>
  17. #include <linux/of.h>
  18. #include <linux/delay.h>
  19. #include <asm/opal.h>
  20. #include <asm/msi_bitmap.h>
  21. #include <asm/pci-bridge.h> /* for struct pci_controller */
  22. #include <asm/pnv-pci.h>
  23. #include <asm/io.h>
  24. #include "cxl.h"
  25. #define CXL_PCI_VSEC_ID 0x1280
  26. #define CXL_VSEC_MIN_SIZE 0x80
  27. #define CXL_READ_VSEC_LENGTH(dev, vsec, dest) \
  28. { \
  29. pci_read_config_word(dev, vsec + 0x6, dest); \
  30. *dest >>= 4; \
  31. }
  32. #define CXL_READ_VSEC_NAFUS(dev, vsec, dest) \
  33. pci_read_config_byte(dev, vsec + 0x8, dest)
  34. #define CXL_READ_VSEC_STATUS(dev, vsec, dest) \
  35. pci_read_config_byte(dev, vsec + 0x9, dest)
  36. #define CXL_STATUS_SECOND_PORT 0x80
  37. #define CXL_STATUS_MSI_X_FULL 0x40
  38. #define CXL_STATUS_MSI_X_SINGLE 0x20
  39. #define CXL_STATUS_FLASH_RW 0x08
  40. #define CXL_STATUS_FLASH_RO 0x04
  41. #define CXL_STATUS_LOADABLE_AFU 0x02
  42. #define CXL_STATUS_LOADABLE_PSL 0x01
  43. /* If we see these features we won't try to use the card */
  44. #define CXL_UNSUPPORTED_FEATURES \
  45. (CXL_STATUS_MSI_X_FULL | CXL_STATUS_MSI_X_SINGLE)
  46. #define CXL_READ_VSEC_MODE_CONTROL(dev, vsec, dest) \
  47. pci_read_config_byte(dev, vsec + 0xa, dest)
  48. #define CXL_WRITE_VSEC_MODE_CONTROL(dev, vsec, val) \
  49. pci_write_config_byte(dev, vsec + 0xa, val)
  50. #define CXL_VSEC_PROTOCOL_MASK 0xe0
  51. #define CXL_VSEC_PROTOCOL_1024TB 0x80
  52. #define CXL_VSEC_PROTOCOL_512TB 0x40
  53. #define CXL_VSEC_PROTOCOL_256TB 0x20 /* Power 8 uses this */
  54. #define CXL_VSEC_PROTOCOL_ENABLE 0x01
  55. #define CXL_READ_VSEC_PSL_REVISION(dev, vsec, dest) \
  56. pci_read_config_word(dev, vsec + 0xc, dest)
  57. #define CXL_READ_VSEC_CAIA_MINOR(dev, vsec, dest) \
  58. pci_read_config_byte(dev, vsec + 0xe, dest)
  59. #define CXL_READ_VSEC_CAIA_MAJOR(dev, vsec, dest) \
  60. pci_read_config_byte(dev, vsec + 0xf, dest)
  61. #define CXL_READ_VSEC_BASE_IMAGE(dev, vsec, dest) \
  62. pci_read_config_word(dev, vsec + 0x10, dest)
  63. #define CXL_READ_VSEC_IMAGE_STATE(dev, vsec, dest) \
  64. pci_read_config_byte(dev, vsec + 0x13, dest)
  65. #define CXL_WRITE_VSEC_IMAGE_STATE(dev, vsec, val) \
  66. pci_write_config_byte(dev, vsec + 0x13, val)
  67. #define CXL_VSEC_USER_IMAGE_LOADED 0x80 /* RO */
  68. #define CXL_VSEC_PERST_LOADS_IMAGE 0x20 /* RW */
  69. #define CXL_VSEC_PERST_SELECT_USER 0x10 /* RW */
  70. #define CXL_READ_VSEC_AFU_DESC_OFF(dev, vsec, dest) \
  71. pci_read_config_dword(dev, vsec + 0x20, dest)
  72. #define CXL_READ_VSEC_AFU_DESC_SIZE(dev, vsec, dest) \
  73. pci_read_config_dword(dev, vsec + 0x24, dest)
  74. #define CXL_READ_VSEC_PS_OFF(dev, vsec, dest) \
  75. pci_read_config_dword(dev, vsec + 0x28, dest)
  76. #define CXL_READ_VSEC_PS_SIZE(dev, vsec, dest) \
  77. pci_read_config_dword(dev, vsec + 0x2c, dest)
  78. /* This works a little different than the p1/p2 register accesses to make it
  79. * easier to pull out individual fields */
  80. #define AFUD_READ(afu, off) in_be64(afu->afu_desc_mmio + off)
  81. #define AFUD_READ_LE(afu, off) in_le64(afu->afu_desc_mmio + off)
  82. #define EXTRACT_PPC_BIT(val, bit) (!!(val & PPC_BIT(bit)))
  83. #define EXTRACT_PPC_BITS(val, bs, be) ((val & PPC_BITMASK(bs, be)) >> PPC_BITLSHIFT(be))
  84. #define AFUD_READ_INFO(afu) AFUD_READ(afu, 0x0)
  85. #define AFUD_NUM_INTS_PER_PROC(val) EXTRACT_PPC_BITS(val, 0, 15)
  86. #define AFUD_NUM_PROCS(val) EXTRACT_PPC_BITS(val, 16, 31)
  87. #define AFUD_NUM_CRS(val) EXTRACT_PPC_BITS(val, 32, 47)
  88. #define AFUD_MULTIMODE(val) EXTRACT_PPC_BIT(val, 48)
  89. #define AFUD_PUSH_BLOCK_TRANSFER(val) EXTRACT_PPC_BIT(val, 55)
  90. #define AFUD_DEDICATED_PROCESS(val) EXTRACT_PPC_BIT(val, 59)
  91. #define AFUD_AFU_DIRECTED(val) EXTRACT_PPC_BIT(val, 61)
  92. #define AFUD_TIME_SLICED(val) EXTRACT_PPC_BIT(val, 63)
  93. #define AFUD_READ_CR(afu) AFUD_READ(afu, 0x20)
  94. #define AFUD_CR_LEN(val) EXTRACT_PPC_BITS(val, 8, 63)
  95. #define AFUD_READ_CR_OFF(afu) AFUD_READ(afu, 0x28)
  96. #define AFUD_READ_PPPSA(afu) AFUD_READ(afu, 0x30)
  97. #define AFUD_PPPSA_PP(val) EXTRACT_PPC_BIT(val, 6)
  98. #define AFUD_PPPSA_PSA(val) EXTRACT_PPC_BIT(val, 7)
  99. #define AFUD_PPPSA_LEN(val) EXTRACT_PPC_BITS(val, 8, 63)
  100. #define AFUD_READ_PPPSA_OFF(afu) AFUD_READ(afu, 0x38)
  101. #define AFUD_READ_EB(afu) AFUD_READ(afu, 0x40)
  102. #define AFUD_EB_LEN(val) EXTRACT_PPC_BITS(val, 8, 63)
  103. #define AFUD_READ_EB_OFF(afu) AFUD_READ(afu, 0x48)
  104. u16 cxl_afu_cr_read16(struct cxl_afu *afu, int cr, u64 off)
  105. {
  106. u64 aligned_off = off & ~0x3L;
  107. u32 val;
  108. val = cxl_afu_cr_read32(afu, cr, aligned_off);
  109. return (val >> ((off & 0x2) * 8)) & 0xffff;
  110. }
  111. u8 cxl_afu_cr_read8(struct cxl_afu *afu, int cr, u64 off)
  112. {
  113. u64 aligned_off = off & ~0x3L;
  114. u32 val;
  115. val = cxl_afu_cr_read32(afu, cr, aligned_off);
  116. return (val >> ((off & 0x3) * 8)) & 0xff;
  117. }
  118. static DEFINE_PCI_DEVICE_TABLE(cxl_pci_tbl) = {
  119. { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x0477), },
  120. { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x044b), },
  121. { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x04cf), },
  122. { PCI_DEVICE_CLASS(0x120000, ~0), },
  123. { }
  124. };
  125. MODULE_DEVICE_TABLE(pci, cxl_pci_tbl);
  126. /*
  127. * Mostly using these wrappers to avoid confusion:
  128. * priv 1 is BAR2, while priv 2 is BAR0
  129. */
  130. static inline resource_size_t p1_base(struct pci_dev *dev)
  131. {
  132. return pci_resource_start(dev, 2);
  133. }
  134. static inline resource_size_t p1_size(struct pci_dev *dev)
  135. {
  136. return pci_resource_len(dev, 2);
  137. }
  138. static inline resource_size_t p2_base(struct pci_dev *dev)
  139. {
  140. return pci_resource_start(dev, 0);
  141. }
  142. static inline resource_size_t p2_size(struct pci_dev *dev)
  143. {
  144. return pci_resource_len(dev, 0);
  145. }
  146. static int find_cxl_vsec(struct pci_dev *dev)
  147. {
  148. int vsec = 0;
  149. u16 val;
  150. while ((vsec = pci_find_next_ext_capability(dev, vsec, PCI_EXT_CAP_ID_VNDR))) {
  151. pci_read_config_word(dev, vsec + 0x4, &val);
  152. if (val == CXL_PCI_VSEC_ID)
  153. return vsec;
  154. }
  155. return 0;
  156. }
  157. static void dump_cxl_config_space(struct pci_dev *dev)
  158. {
  159. int vsec;
  160. u32 val;
  161. dev_info(&dev->dev, "dump_cxl_config_space\n");
  162. pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, &val);
  163. dev_info(&dev->dev, "BAR0: %#.8x\n", val);
  164. pci_read_config_dword(dev, PCI_BASE_ADDRESS_1, &val);
  165. dev_info(&dev->dev, "BAR1: %#.8x\n", val);
  166. pci_read_config_dword(dev, PCI_BASE_ADDRESS_2, &val);
  167. dev_info(&dev->dev, "BAR2: %#.8x\n", val);
  168. pci_read_config_dword(dev, PCI_BASE_ADDRESS_3, &val);
  169. dev_info(&dev->dev, "BAR3: %#.8x\n", val);
  170. pci_read_config_dword(dev, PCI_BASE_ADDRESS_4, &val);
  171. dev_info(&dev->dev, "BAR4: %#.8x\n", val);
  172. pci_read_config_dword(dev, PCI_BASE_ADDRESS_5, &val);
  173. dev_info(&dev->dev, "BAR5: %#.8x\n", val);
  174. dev_info(&dev->dev, "p1 regs: %#llx, len: %#llx\n",
  175. p1_base(dev), p1_size(dev));
  176. dev_info(&dev->dev, "p2 regs: %#llx, len: %#llx\n",
  177. p2_base(dev), p2_size(dev));
  178. dev_info(&dev->dev, "BAR 4/5: %#llx, len: %#llx\n",
  179. pci_resource_start(dev, 4), pci_resource_len(dev, 4));
  180. if (!(vsec = find_cxl_vsec(dev)))
  181. return;
  182. #define show_reg(name, what) \
  183. dev_info(&dev->dev, "cxl vsec: %30s: %#x\n", name, what)
  184. pci_read_config_dword(dev, vsec + 0x0, &val);
  185. show_reg("Cap ID", (val >> 0) & 0xffff);
  186. show_reg("Cap Ver", (val >> 16) & 0xf);
  187. show_reg("Next Cap Ptr", (val >> 20) & 0xfff);
  188. pci_read_config_dword(dev, vsec + 0x4, &val);
  189. show_reg("VSEC ID", (val >> 0) & 0xffff);
  190. show_reg("VSEC Rev", (val >> 16) & 0xf);
  191. show_reg("VSEC Length", (val >> 20) & 0xfff);
  192. pci_read_config_dword(dev, vsec + 0x8, &val);
  193. show_reg("Num AFUs", (val >> 0) & 0xff);
  194. show_reg("Status", (val >> 8) & 0xff);
  195. show_reg("Mode Control", (val >> 16) & 0xff);
  196. show_reg("Reserved", (val >> 24) & 0xff);
  197. pci_read_config_dword(dev, vsec + 0xc, &val);
  198. show_reg("PSL Rev", (val >> 0) & 0xffff);
  199. show_reg("CAIA Ver", (val >> 16) & 0xffff);
  200. pci_read_config_dword(dev, vsec + 0x10, &val);
  201. show_reg("Base Image Rev", (val >> 0) & 0xffff);
  202. show_reg("Reserved", (val >> 16) & 0x0fff);
  203. show_reg("Image Control", (val >> 28) & 0x3);
  204. show_reg("Reserved", (val >> 30) & 0x1);
  205. show_reg("Image Loaded", (val >> 31) & 0x1);
  206. pci_read_config_dword(dev, vsec + 0x14, &val);
  207. show_reg("Reserved", val);
  208. pci_read_config_dword(dev, vsec + 0x18, &val);
  209. show_reg("Reserved", val);
  210. pci_read_config_dword(dev, vsec + 0x1c, &val);
  211. show_reg("Reserved", val);
  212. pci_read_config_dword(dev, vsec + 0x20, &val);
  213. show_reg("AFU Descriptor Offset", val);
  214. pci_read_config_dword(dev, vsec + 0x24, &val);
  215. show_reg("AFU Descriptor Size", val);
  216. pci_read_config_dword(dev, vsec + 0x28, &val);
  217. show_reg("Problem State Offset", val);
  218. pci_read_config_dword(dev, vsec + 0x2c, &val);
  219. show_reg("Problem State Size", val);
  220. pci_read_config_dword(dev, vsec + 0x30, &val);
  221. show_reg("Reserved", val);
  222. pci_read_config_dword(dev, vsec + 0x34, &val);
  223. show_reg("Reserved", val);
  224. pci_read_config_dword(dev, vsec + 0x38, &val);
  225. show_reg("Reserved", val);
  226. pci_read_config_dword(dev, vsec + 0x3c, &val);
  227. show_reg("Reserved", val);
  228. pci_read_config_dword(dev, vsec + 0x40, &val);
  229. show_reg("PSL Programming Port", val);
  230. pci_read_config_dword(dev, vsec + 0x44, &val);
  231. show_reg("PSL Programming Control", val);
  232. pci_read_config_dword(dev, vsec + 0x48, &val);
  233. show_reg("Reserved", val);
  234. pci_read_config_dword(dev, vsec + 0x4c, &val);
  235. show_reg("Reserved", val);
  236. pci_read_config_dword(dev, vsec + 0x50, &val);
  237. show_reg("Flash Address Register", val);
  238. pci_read_config_dword(dev, vsec + 0x54, &val);
  239. show_reg("Flash Size Register", val);
  240. pci_read_config_dword(dev, vsec + 0x58, &val);
  241. show_reg("Flash Status/Control Register", val);
  242. pci_read_config_dword(dev, vsec + 0x58, &val);
  243. show_reg("Flash Data Port", val);
  244. #undef show_reg
  245. }
  246. static void dump_afu_descriptor(struct cxl_afu *afu)
  247. {
  248. u64 val, afu_cr_num, afu_cr_off, afu_cr_len;
  249. int i;
  250. #define show_reg(name, what) \
  251. dev_info(&afu->dev, "afu desc: %30s: %#llx\n", name, what)
  252. val = AFUD_READ_INFO(afu);
  253. show_reg("num_ints_per_process", AFUD_NUM_INTS_PER_PROC(val));
  254. show_reg("num_of_processes", AFUD_NUM_PROCS(val));
  255. show_reg("num_of_afu_CRs", AFUD_NUM_CRS(val));
  256. show_reg("req_prog_mode", val & 0xffffULL);
  257. afu_cr_num = AFUD_NUM_CRS(val);
  258. val = AFUD_READ(afu, 0x8);
  259. show_reg("Reserved", val);
  260. val = AFUD_READ(afu, 0x10);
  261. show_reg("Reserved", val);
  262. val = AFUD_READ(afu, 0x18);
  263. show_reg("Reserved", val);
  264. val = AFUD_READ_CR(afu);
  265. show_reg("Reserved", (val >> (63-7)) & 0xff);
  266. show_reg("AFU_CR_len", AFUD_CR_LEN(val));
  267. afu_cr_len = AFUD_CR_LEN(val) * 256;
  268. val = AFUD_READ_CR_OFF(afu);
  269. afu_cr_off = val;
  270. show_reg("AFU_CR_offset", val);
  271. val = AFUD_READ_PPPSA(afu);
  272. show_reg("PerProcessPSA_control", (val >> (63-7)) & 0xff);
  273. show_reg("PerProcessPSA Length", AFUD_PPPSA_LEN(val));
  274. val = AFUD_READ_PPPSA_OFF(afu);
  275. show_reg("PerProcessPSA_offset", val);
  276. val = AFUD_READ_EB(afu);
  277. show_reg("Reserved", (val >> (63-7)) & 0xff);
  278. show_reg("AFU_EB_len", AFUD_EB_LEN(val));
  279. val = AFUD_READ_EB_OFF(afu);
  280. show_reg("AFU_EB_offset", val);
  281. for (i = 0; i < afu_cr_num; i++) {
  282. val = AFUD_READ_LE(afu, afu_cr_off + i * afu_cr_len);
  283. show_reg("CR Vendor", val & 0xffff);
  284. show_reg("CR Device", (val >> 16) & 0xffff);
  285. }
  286. #undef show_reg
  287. }
  288. static int init_implementation_adapter_regs(struct cxl *adapter, struct pci_dev *dev)
  289. {
  290. struct device_node *np;
  291. const __be32 *prop;
  292. u64 psl_dsnctl;
  293. u64 chipid;
  294. if (!(np = pnv_pci_get_phb_node(dev)))
  295. return -ENODEV;
  296. while (np && !(prop = of_get_property(np, "ibm,chip-id", NULL)))
  297. np = of_get_next_parent(np);
  298. if (!np)
  299. return -ENODEV;
  300. chipid = be32_to_cpup(prop);
  301. of_node_put(np);
  302. /* Tell PSL where to route data to */
  303. psl_dsnctl = 0x02E8900002000000ULL | (chipid << (63-5));
  304. cxl_p1_write(adapter, CXL_PSL_DSNDCTL, psl_dsnctl);
  305. cxl_p1_write(adapter, CXL_PSL_RESLCKTO, 0x20000000200ULL);
  306. /* snoop write mask */
  307. cxl_p1_write(adapter, CXL_PSL_SNWRALLOC, 0x00000000FFFFFFFFULL);
  308. /* set fir_accum */
  309. cxl_p1_write(adapter, CXL_PSL_FIR_CNTL, 0x0800000000000000ULL);
  310. /* for debugging with trace arrays */
  311. cxl_p1_write(adapter, CXL_PSL_TRACE, 0x0000FF7C00000000ULL);
  312. return 0;
  313. }
  314. static int init_implementation_afu_regs(struct cxl_afu *afu)
  315. {
  316. /* read/write masks for this slice */
  317. cxl_p1n_write(afu, CXL_PSL_APCALLOC_A, 0xFFFFFFFEFEFEFEFEULL);
  318. /* APC read/write masks for this slice */
  319. cxl_p1n_write(afu, CXL_PSL_COALLOC_A, 0xFF000000FEFEFEFEULL);
  320. /* for debugging with trace arrays */
  321. cxl_p1n_write(afu, CXL_PSL_SLICE_TRACE, 0x0000FFFF00000000ULL);
  322. cxl_p1n_write(afu, CXL_PSL_RXCTL_A, CXL_PSL_RXCTL_AFUHP_4S);
  323. return 0;
  324. }
  325. int cxl_setup_irq(struct cxl *adapter, unsigned int hwirq,
  326. unsigned int virq)
  327. {
  328. struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
  329. return pnv_cxl_ioda_msi_setup(dev, hwirq, virq);
  330. }
  331. int cxl_update_image_control(struct cxl *adapter)
  332. {
  333. struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
  334. int rc;
  335. int vsec;
  336. u8 image_state;
  337. if (!(vsec = find_cxl_vsec(dev))) {
  338. dev_err(&dev->dev, "ABORTING: CXL VSEC not found!\n");
  339. return -ENODEV;
  340. }
  341. if ((rc = CXL_READ_VSEC_IMAGE_STATE(dev, vsec, &image_state))) {
  342. dev_err(&dev->dev, "failed to read image state: %i\n", rc);
  343. return rc;
  344. }
  345. if (adapter->perst_loads_image)
  346. image_state |= CXL_VSEC_PERST_LOADS_IMAGE;
  347. else
  348. image_state &= ~CXL_VSEC_PERST_LOADS_IMAGE;
  349. if (adapter->perst_select_user)
  350. image_state |= CXL_VSEC_PERST_SELECT_USER;
  351. else
  352. image_state &= ~CXL_VSEC_PERST_SELECT_USER;
  353. if ((rc = CXL_WRITE_VSEC_IMAGE_STATE(dev, vsec, image_state))) {
  354. dev_err(&dev->dev, "failed to update image control: %i\n", rc);
  355. return rc;
  356. }
  357. return 0;
  358. }
  359. int cxl_alloc_one_irq(struct cxl *adapter)
  360. {
  361. struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
  362. return pnv_cxl_alloc_hwirqs(dev, 1);
  363. }
  364. void cxl_release_one_irq(struct cxl *adapter, int hwirq)
  365. {
  366. struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
  367. return pnv_cxl_release_hwirqs(dev, hwirq, 1);
  368. }
  369. int cxl_alloc_irq_ranges(struct cxl_irq_ranges *irqs, struct cxl *adapter, unsigned int num)
  370. {
  371. struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
  372. return pnv_cxl_alloc_hwirq_ranges(irqs, dev, num);
  373. }
  374. void cxl_release_irq_ranges(struct cxl_irq_ranges *irqs, struct cxl *adapter)
  375. {
  376. struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
  377. pnv_cxl_release_hwirq_ranges(irqs, dev);
  378. }
  379. static int setup_cxl_bars(struct pci_dev *dev)
  380. {
  381. /* Safety check in case we get backported to < 3.17 without M64 */
  382. if ((p1_base(dev) < 0x100000000ULL) ||
  383. (p2_base(dev) < 0x100000000ULL)) {
  384. dev_err(&dev->dev, "ABORTING: M32 BAR assignment incompatible with CXL\n");
  385. return -ENODEV;
  386. }
  387. /*
  388. * BAR 4/5 has a special meaning for CXL and must be programmed with a
  389. * special value corresponding to the CXL protocol address range.
  390. * For POWER 8 that means bits 48:49 must be set to 10
  391. */
  392. pci_write_config_dword(dev, PCI_BASE_ADDRESS_4, 0x00000000);
  393. pci_write_config_dword(dev, PCI_BASE_ADDRESS_5, 0x00020000);
  394. return 0;
  395. }
  396. /* pciex node: ibm,opal-m64-window = <0x3d058 0x0 0x3d058 0x0 0x8 0x0>; */
  397. static int switch_card_to_cxl(struct pci_dev *dev)
  398. {
  399. int vsec;
  400. u8 val;
  401. int rc;
  402. dev_info(&dev->dev, "switch card to CXL\n");
  403. if (!(vsec = find_cxl_vsec(dev))) {
  404. dev_err(&dev->dev, "ABORTING: CXL VSEC not found!\n");
  405. return -ENODEV;
  406. }
  407. if ((rc = CXL_READ_VSEC_MODE_CONTROL(dev, vsec, &val))) {
  408. dev_err(&dev->dev, "failed to read current mode control: %i", rc);
  409. return rc;
  410. }
  411. val &= ~CXL_VSEC_PROTOCOL_MASK;
  412. val |= CXL_VSEC_PROTOCOL_256TB | CXL_VSEC_PROTOCOL_ENABLE;
  413. if ((rc = CXL_WRITE_VSEC_MODE_CONTROL(dev, vsec, val))) {
  414. dev_err(&dev->dev, "failed to enable CXL protocol: %i", rc);
  415. return rc;
  416. }
  417. /*
  418. * The CAIA spec (v0.12 11.6 Bi-modal Device Support) states
  419. * we must wait 100ms after this mode switch before touching
  420. * PCIe config space.
  421. */
  422. msleep(100);
  423. return 0;
  424. }
  425. static int cxl_map_slice_regs(struct cxl_afu *afu, struct cxl *adapter, struct pci_dev *dev)
  426. {
  427. u64 p1n_base, p2n_base, afu_desc;
  428. const u64 p1n_size = 0x100;
  429. const u64 p2n_size = 0x1000;
  430. p1n_base = p1_base(dev) + 0x10000 + (afu->slice * p1n_size);
  431. p2n_base = p2_base(dev) + (afu->slice * p2n_size);
  432. afu->psn_phys = p2_base(dev) + (adapter->ps_off + (afu->slice * adapter->ps_size));
  433. afu_desc = p2_base(dev) + adapter->afu_desc_off + (afu->slice * adapter->afu_desc_size);
  434. if (!(afu->p1n_mmio = ioremap(p1n_base, p1n_size)))
  435. goto err;
  436. if (!(afu->p2n_mmio = ioremap(p2n_base, p2n_size)))
  437. goto err1;
  438. if (afu_desc) {
  439. if (!(afu->afu_desc_mmio = ioremap(afu_desc, adapter->afu_desc_size)))
  440. goto err2;
  441. }
  442. return 0;
  443. err2:
  444. iounmap(afu->p2n_mmio);
  445. err1:
  446. iounmap(afu->p1n_mmio);
  447. err:
  448. dev_err(&afu->dev, "Error mapping AFU MMIO regions\n");
  449. return -ENOMEM;
  450. }
  451. static void cxl_unmap_slice_regs(struct cxl_afu *afu)
  452. {
  453. if (afu->p1n_mmio)
  454. iounmap(afu->p2n_mmio);
  455. if (afu->p1n_mmio)
  456. iounmap(afu->p1n_mmio);
  457. }
  458. static void cxl_release_afu(struct device *dev)
  459. {
  460. struct cxl_afu *afu = to_cxl_afu(dev);
  461. pr_devel("cxl_release_afu\n");
  462. kfree(afu);
  463. }
  464. static struct cxl_afu *cxl_alloc_afu(struct cxl *adapter, int slice)
  465. {
  466. struct cxl_afu *afu;
  467. if (!(afu = kzalloc(sizeof(struct cxl_afu), GFP_KERNEL)))
  468. return NULL;
  469. afu->adapter = adapter;
  470. afu->dev.parent = &adapter->dev;
  471. afu->dev.release = cxl_release_afu;
  472. afu->slice = slice;
  473. idr_init(&afu->contexts_idr);
  474. mutex_init(&afu->contexts_lock);
  475. spin_lock_init(&afu->afu_cntl_lock);
  476. mutex_init(&afu->spa_mutex);
  477. afu->prefault_mode = CXL_PREFAULT_NONE;
  478. afu->irqs_max = afu->adapter->user_irqs;
  479. return afu;
  480. }
  481. /* Expects AFU struct to have recently been zeroed out */
  482. static int cxl_read_afu_descriptor(struct cxl_afu *afu)
  483. {
  484. u64 val;
  485. val = AFUD_READ_INFO(afu);
  486. afu->pp_irqs = AFUD_NUM_INTS_PER_PROC(val);
  487. afu->max_procs_virtualised = AFUD_NUM_PROCS(val);
  488. afu->crs_num = AFUD_NUM_CRS(val);
  489. if (AFUD_AFU_DIRECTED(val))
  490. afu->modes_supported |= CXL_MODE_DIRECTED;
  491. if (AFUD_DEDICATED_PROCESS(val))
  492. afu->modes_supported |= CXL_MODE_DEDICATED;
  493. if (AFUD_TIME_SLICED(val))
  494. afu->modes_supported |= CXL_MODE_TIME_SLICED;
  495. val = AFUD_READ_PPPSA(afu);
  496. afu->pp_size = AFUD_PPPSA_LEN(val) * 4096;
  497. afu->psa = AFUD_PPPSA_PSA(val);
  498. if ((afu->pp_psa = AFUD_PPPSA_PP(val)))
  499. afu->pp_offset = AFUD_READ_PPPSA_OFF(afu);
  500. val = AFUD_READ_CR(afu);
  501. afu->crs_len = AFUD_CR_LEN(val) * 256;
  502. afu->crs_offset = AFUD_READ_CR_OFF(afu);
  503. /* eb_len is in multiple of 4K */
  504. afu->eb_len = AFUD_EB_LEN(AFUD_READ_EB(afu)) * 4096;
  505. afu->eb_offset = AFUD_READ_EB_OFF(afu);
  506. /* eb_off is 4K aligned so lower 12 bits are always zero */
  507. if (EXTRACT_PPC_BITS(afu->eb_offset, 0, 11) != 0) {
  508. dev_warn(&afu->dev,
  509. "Invalid AFU error buffer offset %Lx\n",
  510. afu->eb_offset);
  511. dev_info(&afu->dev,
  512. "Ignoring AFU error buffer in the descriptor\n");
  513. /* indicate that no afu buffer exists */
  514. afu->eb_len = 0;
  515. }
  516. return 0;
  517. }
  518. static int cxl_afu_descriptor_looks_ok(struct cxl_afu *afu)
  519. {
  520. int i;
  521. if (afu->psa && afu->adapter->ps_size <
  522. (afu->pp_offset + afu->pp_size*afu->max_procs_virtualised)) {
  523. dev_err(&afu->dev, "per-process PSA can't fit inside the PSA!\n");
  524. return -ENODEV;
  525. }
  526. if (afu->pp_psa && (afu->pp_size < PAGE_SIZE))
  527. dev_warn(&afu->dev, "AFU uses < PAGE_SIZE per-process PSA!");
  528. for (i = 0; i < afu->crs_num; i++) {
  529. if ((cxl_afu_cr_read32(afu, i, 0) == 0)) {
  530. dev_err(&afu->dev, "ABORTING: AFU configuration record %i is invalid\n", i);
  531. return -EINVAL;
  532. }
  533. }
  534. return 0;
  535. }
  536. static int sanitise_afu_regs(struct cxl_afu *afu)
  537. {
  538. u64 reg;
  539. /*
  540. * Clear out any regs that contain either an IVTE or address or may be
  541. * waiting on an acknowledgement to try to be a bit safer as we bring
  542. * it online
  543. */
  544. reg = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
  545. if ((reg & CXL_AFU_Cntl_An_ES_MASK) != CXL_AFU_Cntl_An_ES_Disabled) {
  546. dev_warn(&afu->dev, "WARNING: AFU was not disabled: %#.16llx\n", reg);
  547. if (__cxl_afu_reset(afu))
  548. return -EIO;
  549. if (cxl_afu_disable(afu))
  550. return -EIO;
  551. if (cxl_psl_purge(afu))
  552. return -EIO;
  553. }
  554. cxl_p1n_write(afu, CXL_PSL_SPAP_An, 0x0000000000000000);
  555. cxl_p1n_write(afu, CXL_PSL_IVTE_Limit_An, 0x0000000000000000);
  556. cxl_p1n_write(afu, CXL_PSL_IVTE_Offset_An, 0x0000000000000000);
  557. cxl_p1n_write(afu, CXL_PSL_AMBAR_An, 0x0000000000000000);
  558. cxl_p1n_write(afu, CXL_PSL_SPOffset_An, 0x0000000000000000);
  559. cxl_p1n_write(afu, CXL_HAURP_An, 0x0000000000000000);
  560. cxl_p2n_write(afu, CXL_CSRP_An, 0x0000000000000000);
  561. cxl_p2n_write(afu, CXL_AURP1_An, 0x0000000000000000);
  562. cxl_p2n_write(afu, CXL_AURP0_An, 0x0000000000000000);
  563. cxl_p2n_write(afu, CXL_SSTP1_An, 0x0000000000000000);
  564. cxl_p2n_write(afu, CXL_SSTP0_An, 0x0000000000000000);
  565. reg = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
  566. if (reg) {
  567. dev_warn(&afu->dev, "AFU had pending DSISR: %#.16llx\n", reg);
  568. if (reg & CXL_PSL_DSISR_TRANS)
  569. cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE);
  570. else
  571. cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A);
  572. }
  573. reg = cxl_p1n_read(afu, CXL_PSL_SERR_An);
  574. if (reg) {
  575. if (reg & ~0xffff)
  576. dev_warn(&afu->dev, "AFU had pending SERR: %#.16llx\n", reg);
  577. cxl_p1n_write(afu, CXL_PSL_SERR_An, reg & ~0xffff);
  578. }
  579. reg = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
  580. if (reg) {
  581. dev_warn(&afu->dev, "AFU had pending error status: %#.16llx\n", reg);
  582. cxl_p2n_write(afu, CXL_PSL_ErrStat_An, reg);
  583. }
  584. return 0;
  585. }
  586. #define ERR_BUFF_MAX_COPY_SIZE PAGE_SIZE
  587. /*
  588. * afu_eb_read:
  589. * Called from sysfs and reads the afu error info buffer. The h/w only supports
  590. * 4/8 bytes aligned access. So in case the requested offset/count arent 8 byte
  591. * aligned the function uses a bounce buffer which can be max PAGE_SIZE.
  592. */
  593. ssize_t cxl_afu_read_err_buffer(struct cxl_afu *afu, char *buf,
  594. loff_t off, size_t count)
  595. {
  596. loff_t aligned_start, aligned_end;
  597. size_t aligned_length;
  598. void *tbuf;
  599. const void __iomem *ebuf = afu->afu_desc_mmio + afu->eb_offset;
  600. if (count == 0 || off < 0 || (size_t)off >= afu->eb_len)
  601. return 0;
  602. /* calculate aligned read window */
  603. count = min((size_t)(afu->eb_len - off), count);
  604. aligned_start = round_down(off, 8);
  605. aligned_end = round_up(off + count, 8);
  606. aligned_length = aligned_end - aligned_start;
  607. /* max we can copy in one read is PAGE_SIZE */
  608. if (aligned_length > ERR_BUFF_MAX_COPY_SIZE) {
  609. aligned_length = ERR_BUFF_MAX_COPY_SIZE;
  610. count = ERR_BUFF_MAX_COPY_SIZE - (off & 0x7);
  611. }
  612. /* use bounce buffer for copy */
  613. tbuf = (void *)__get_free_page(GFP_TEMPORARY);
  614. if (!tbuf)
  615. return -ENOMEM;
  616. /* perform aligned read from the mmio region */
  617. memcpy_fromio(tbuf, ebuf + aligned_start, aligned_length);
  618. memcpy(buf, tbuf + (off & 0x7), count);
  619. free_page((unsigned long)tbuf);
  620. return count;
  621. }
  622. static int cxl_init_afu(struct cxl *adapter, int slice, struct pci_dev *dev)
  623. {
  624. struct cxl_afu *afu;
  625. bool free = true;
  626. int rc;
  627. if (!(afu = cxl_alloc_afu(adapter, slice)))
  628. return -ENOMEM;
  629. if ((rc = dev_set_name(&afu->dev, "afu%i.%i", adapter->adapter_num, slice)))
  630. goto err1;
  631. if ((rc = cxl_map_slice_regs(afu, adapter, dev)))
  632. goto err1;
  633. if ((rc = sanitise_afu_regs(afu)))
  634. goto err2;
  635. /* We need to reset the AFU before we can read the AFU descriptor */
  636. if ((rc = __cxl_afu_reset(afu)))
  637. goto err2;
  638. if (cxl_verbose)
  639. dump_afu_descriptor(afu);
  640. if ((rc = cxl_read_afu_descriptor(afu)))
  641. goto err2;
  642. if ((rc = cxl_afu_descriptor_looks_ok(afu)))
  643. goto err2;
  644. if ((rc = init_implementation_afu_regs(afu)))
  645. goto err2;
  646. if ((rc = cxl_register_serr_irq(afu)))
  647. goto err2;
  648. if ((rc = cxl_register_psl_irq(afu)))
  649. goto err3;
  650. /* Don't care if this fails */
  651. cxl_debugfs_afu_add(afu);
  652. /*
  653. * After we call this function we must not free the afu directly, even
  654. * if it returns an error!
  655. */
  656. if ((rc = cxl_register_afu(afu)))
  657. goto err_put1;
  658. if ((rc = cxl_sysfs_afu_add(afu)))
  659. goto err_put1;
  660. if ((rc = cxl_afu_select_best_mode(afu)))
  661. goto err_put2;
  662. adapter->afu[afu->slice] = afu;
  663. if ((rc = cxl_pci_vphb_add(afu)))
  664. dev_info(&afu->dev, "Can't register vPHB\n");
  665. return 0;
  666. err_put2:
  667. cxl_sysfs_afu_remove(afu);
  668. err_put1:
  669. device_unregister(&afu->dev);
  670. free = false;
  671. cxl_debugfs_afu_remove(afu);
  672. cxl_release_psl_irq(afu);
  673. err3:
  674. cxl_release_serr_irq(afu);
  675. err2:
  676. cxl_unmap_slice_regs(afu);
  677. err1:
  678. if (free)
  679. kfree(afu);
  680. return rc;
  681. }
  682. static void cxl_remove_afu(struct cxl_afu *afu)
  683. {
  684. pr_devel("cxl_remove_afu\n");
  685. if (!afu)
  686. return;
  687. cxl_sysfs_afu_remove(afu);
  688. cxl_debugfs_afu_remove(afu);
  689. spin_lock(&afu->adapter->afu_list_lock);
  690. afu->adapter->afu[afu->slice] = NULL;
  691. spin_unlock(&afu->adapter->afu_list_lock);
  692. cxl_context_detach_all(afu);
  693. cxl_afu_deactivate_mode(afu);
  694. cxl_release_psl_irq(afu);
  695. cxl_release_serr_irq(afu);
  696. cxl_unmap_slice_regs(afu);
  697. device_unregister(&afu->dev);
  698. }
  699. int cxl_reset(struct cxl *adapter)
  700. {
  701. struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
  702. int rc;
  703. int i;
  704. u32 val;
  705. dev_info(&dev->dev, "CXL reset\n");
  706. for (i = 0; i < adapter->slices; i++) {
  707. cxl_pci_vphb_remove(adapter->afu[i]);
  708. cxl_remove_afu(adapter->afu[i]);
  709. }
  710. /* pcie_warm_reset requests a fundamental pci reset which includes a
  711. * PERST assert/deassert. PERST triggers a loading of the image
  712. * if "user" or "factory" is selected in sysfs */
  713. if ((rc = pci_set_pcie_reset_state(dev, pcie_warm_reset))) {
  714. dev_err(&dev->dev, "cxl: pcie_warm_reset failed\n");
  715. return rc;
  716. }
  717. /* the PERST done above fences the PHB. So, reset depends on EEH
  718. * to unbind the driver, tell Sapphire to reinit the PHB, and rebind
  719. * the driver. Do an mmio read explictly to ensure EEH notices the
  720. * fenced PHB. Retry for a few seconds before giving up. */
  721. i = 0;
  722. while (((val = mmio_read32be(adapter->p1_mmio)) != 0xffffffff) &&
  723. (i < 5)) {
  724. msleep(500);
  725. i++;
  726. }
  727. if (val != 0xffffffff)
  728. dev_err(&dev->dev, "cxl: PERST failed to trigger EEH\n");
  729. return rc;
  730. }
  731. static int cxl_map_adapter_regs(struct cxl *adapter, struct pci_dev *dev)
  732. {
  733. if (pci_request_region(dev, 2, "priv 2 regs"))
  734. goto err1;
  735. if (pci_request_region(dev, 0, "priv 1 regs"))
  736. goto err2;
  737. pr_devel("cxl_map_adapter_regs: p1: %#.16llx %#llx, p2: %#.16llx %#llx",
  738. p1_base(dev), p1_size(dev), p2_base(dev), p2_size(dev));
  739. if (!(adapter->p1_mmio = ioremap(p1_base(dev), p1_size(dev))))
  740. goto err3;
  741. if (!(adapter->p2_mmio = ioremap(p2_base(dev), p2_size(dev))))
  742. goto err4;
  743. return 0;
  744. err4:
  745. iounmap(adapter->p1_mmio);
  746. adapter->p1_mmio = NULL;
  747. err3:
  748. pci_release_region(dev, 0);
  749. err2:
  750. pci_release_region(dev, 2);
  751. err1:
  752. return -ENOMEM;
  753. }
  754. static void cxl_unmap_adapter_regs(struct cxl *adapter)
  755. {
  756. if (adapter->p1_mmio)
  757. iounmap(adapter->p1_mmio);
  758. if (adapter->p2_mmio)
  759. iounmap(adapter->p2_mmio);
  760. }
  761. static int cxl_read_vsec(struct cxl *adapter, struct pci_dev *dev)
  762. {
  763. int vsec;
  764. u32 afu_desc_off, afu_desc_size;
  765. u32 ps_off, ps_size;
  766. u16 vseclen;
  767. u8 image_state;
  768. if (!(vsec = find_cxl_vsec(dev))) {
  769. dev_err(&dev->dev, "ABORTING: CXL VSEC not found!\n");
  770. return -ENODEV;
  771. }
  772. CXL_READ_VSEC_LENGTH(dev, vsec, &vseclen);
  773. if (vseclen < CXL_VSEC_MIN_SIZE) {
  774. dev_err(&dev->dev, "ABORTING: CXL VSEC too short\n");
  775. return -EINVAL;
  776. }
  777. CXL_READ_VSEC_STATUS(dev, vsec, &adapter->vsec_status);
  778. CXL_READ_VSEC_PSL_REVISION(dev, vsec, &adapter->psl_rev);
  779. CXL_READ_VSEC_CAIA_MAJOR(dev, vsec, &adapter->caia_major);
  780. CXL_READ_VSEC_CAIA_MINOR(dev, vsec, &adapter->caia_minor);
  781. CXL_READ_VSEC_BASE_IMAGE(dev, vsec, &adapter->base_image);
  782. CXL_READ_VSEC_IMAGE_STATE(dev, vsec, &image_state);
  783. adapter->user_image_loaded = !!(image_state & CXL_VSEC_USER_IMAGE_LOADED);
  784. adapter->perst_loads_image = true;
  785. adapter->perst_select_user = !!(image_state & CXL_VSEC_USER_IMAGE_LOADED);
  786. CXL_READ_VSEC_NAFUS(dev, vsec, &adapter->slices);
  787. CXL_READ_VSEC_AFU_DESC_OFF(dev, vsec, &afu_desc_off);
  788. CXL_READ_VSEC_AFU_DESC_SIZE(dev, vsec, &afu_desc_size);
  789. CXL_READ_VSEC_PS_OFF(dev, vsec, &ps_off);
  790. CXL_READ_VSEC_PS_SIZE(dev, vsec, &ps_size);
  791. /* Convert everything to bytes, because there is NO WAY I'd look at the
  792. * code a month later and forget what units these are in ;-) */
  793. adapter->ps_off = ps_off * 64 * 1024;
  794. adapter->ps_size = ps_size * 64 * 1024;
  795. adapter->afu_desc_off = afu_desc_off * 64 * 1024;
  796. adapter->afu_desc_size = afu_desc_size *64 * 1024;
  797. /* Total IRQs - 1 PSL ERROR - #AFU*(1 slice error + 1 DSI) */
  798. adapter->user_irqs = pnv_cxl_get_irq_count(dev) - 1 - 2*adapter->slices;
  799. return 0;
  800. }
  801. static int cxl_vsec_looks_ok(struct cxl *adapter, struct pci_dev *dev)
  802. {
  803. if (adapter->vsec_status & CXL_STATUS_SECOND_PORT)
  804. return -EBUSY;
  805. if (adapter->vsec_status & CXL_UNSUPPORTED_FEATURES) {
  806. dev_err(&dev->dev, "ABORTING: CXL requires unsupported features\n");
  807. return -EINVAL;
  808. }
  809. if (!adapter->slices) {
  810. /* Once we support dynamic reprogramming we can use the card if
  811. * it supports loadable AFUs */
  812. dev_err(&dev->dev, "ABORTING: Device has no AFUs\n");
  813. return -EINVAL;
  814. }
  815. if (!adapter->afu_desc_off || !adapter->afu_desc_size) {
  816. dev_err(&dev->dev, "ABORTING: VSEC shows no AFU descriptors\n");
  817. return -EINVAL;
  818. }
  819. if (adapter->ps_size > p2_size(dev) - adapter->ps_off) {
  820. dev_err(&dev->dev, "ABORTING: Problem state size larger than "
  821. "available in BAR2: 0x%llx > 0x%llx\n",
  822. adapter->ps_size, p2_size(dev) - adapter->ps_off);
  823. return -EINVAL;
  824. }
  825. return 0;
  826. }
  827. static void cxl_release_adapter(struct device *dev)
  828. {
  829. struct cxl *adapter = to_cxl_adapter(dev);
  830. pr_devel("cxl_release_adapter\n");
  831. kfree(adapter);
  832. }
  833. static struct cxl *cxl_alloc_adapter(struct pci_dev *dev)
  834. {
  835. struct cxl *adapter;
  836. if (!(adapter = kzalloc(sizeof(struct cxl), GFP_KERNEL)))
  837. return NULL;
  838. adapter->dev.parent = &dev->dev;
  839. adapter->dev.release = cxl_release_adapter;
  840. pci_set_drvdata(dev, adapter);
  841. spin_lock_init(&adapter->afu_list_lock);
  842. return adapter;
  843. }
  844. static int sanitise_adapter_regs(struct cxl *adapter)
  845. {
  846. cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000);
  847. return cxl_tlb_slb_invalidate(adapter);
  848. }
  849. static struct cxl *cxl_init_adapter(struct pci_dev *dev)
  850. {
  851. struct cxl *adapter;
  852. bool free = true;
  853. int rc;
  854. if (!(adapter = cxl_alloc_adapter(dev)))
  855. return ERR_PTR(-ENOMEM);
  856. if ((rc = cxl_read_vsec(adapter, dev)))
  857. goto err1;
  858. if ((rc = cxl_vsec_looks_ok(adapter, dev)))
  859. goto err1;
  860. if ((rc = setup_cxl_bars(dev)))
  861. goto err1;
  862. if ((rc = switch_card_to_cxl(dev)))
  863. goto err1;
  864. if ((rc = cxl_alloc_adapter_nr(adapter)))
  865. goto err1;
  866. if ((rc = dev_set_name(&adapter->dev, "card%i", adapter->adapter_num)))
  867. goto err2;
  868. if ((rc = cxl_update_image_control(adapter)))
  869. goto err2;
  870. if ((rc = cxl_map_adapter_regs(adapter, dev)))
  871. goto err2;
  872. if ((rc = sanitise_adapter_regs(adapter)))
  873. goto err2;
  874. if ((rc = init_implementation_adapter_regs(adapter, dev)))
  875. goto err3;
  876. if ((rc = pnv_phb_to_cxl_mode(dev, OPAL_PHB_CAPI_MODE_CAPI)))
  877. goto err3;
  878. /* If recovery happened, the last step is to turn on snooping.
  879. * In the non-recovery case this has no effect */
  880. if ((rc = pnv_phb_to_cxl_mode(dev, OPAL_PHB_CAPI_MODE_SNOOP_ON))) {
  881. goto err3;
  882. }
  883. if ((rc = cxl_register_psl_err_irq(adapter)))
  884. goto err3;
  885. /* Don't care if this one fails: */
  886. cxl_debugfs_adapter_add(adapter);
  887. /*
  888. * After we call this function we must not free the adapter directly,
  889. * even if it returns an error!
  890. */
  891. if ((rc = cxl_register_adapter(adapter)))
  892. goto err_put1;
  893. if ((rc = cxl_sysfs_adapter_add(adapter)))
  894. goto err_put1;
  895. return adapter;
  896. err_put1:
  897. device_unregister(&adapter->dev);
  898. free = false;
  899. cxl_debugfs_adapter_remove(adapter);
  900. cxl_release_psl_err_irq(adapter);
  901. err3:
  902. cxl_unmap_adapter_regs(adapter);
  903. err2:
  904. cxl_remove_adapter_nr(adapter);
  905. err1:
  906. if (free)
  907. kfree(adapter);
  908. return ERR_PTR(rc);
  909. }
  910. static void cxl_remove_adapter(struct cxl *adapter)
  911. {
  912. struct pci_dev *pdev = to_pci_dev(adapter->dev.parent);
  913. pr_devel("cxl_release_adapter\n");
  914. cxl_sysfs_adapter_remove(adapter);
  915. cxl_debugfs_adapter_remove(adapter);
  916. cxl_release_psl_err_irq(adapter);
  917. cxl_unmap_adapter_regs(adapter);
  918. cxl_remove_adapter_nr(adapter);
  919. device_unregister(&adapter->dev);
  920. pci_release_region(pdev, 0);
  921. pci_release_region(pdev, 2);
  922. pci_disable_device(pdev);
  923. }
  924. static int cxl_probe(struct pci_dev *dev, const struct pci_device_id *id)
  925. {
  926. struct cxl *adapter;
  927. int slice;
  928. int rc;
  929. pci_dev_get(dev);
  930. if (cxl_verbose)
  931. dump_cxl_config_space(dev);
  932. if ((rc = pci_enable_device(dev))) {
  933. dev_err(&dev->dev, "pci_enable_device failed: %i\n", rc);
  934. return rc;
  935. }
  936. adapter = cxl_init_adapter(dev);
  937. if (IS_ERR(adapter)) {
  938. dev_err(&dev->dev, "cxl_init_adapter failed: %li\n", PTR_ERR(adapter));
  939. pci_disable_device(dev);
  940. return PTR_ERR(adapter);
  941. }
  942. for (slice = 0; slice < adapter->slices; slice++) {
  943. if ((rc = cxl_init_afu(adapter, slice, dev)))
  944. dev_err(&dev->dev, "AFU %i failed to initialise: %i\n", slice, rc);
  945. }
  946. return 0;
  947. }
  948. static void cxl_remove(struct pci_dev *dev)
  949. {
  950. struct cxl *adapter = pci_get_drvdata(dev);
  951. struct cxl_afu *afu;
  952. int i;
  953. /*
  954. * Lock to prevent someone grabbing a ref through the adapter list as
  955. * we are removing it
  956. */
  957. for (i = 0; i < adapter->slices; i++) {
  958. afu = adapter->afu[i];
  959. cxl_pci_vphb_remove(afu);
  960. cxl_remove_afu(afu);
  961. }
  962. cxl_remove_adapter(adapter);
  963. }
  964. struct pci_driver cxl_pci_driver = {
  965. .name = "cxl-pci",
  966. .id_table = cxl_pci_tbl,
  967. .probe = cxl_probe,
  968. .remove = cxl_remove,
  969. .shutdown = cxl_remove,
  970. };