thunderx_edac.c 53 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173
  1. /*
  2. * Cavium ThunderX memory controller kernel module
  3. *
  4. * This file is subject to the terms and conditions of the GNU General Public
  5. * License. See the file "COPYING" in the main directory of this archive
  6. * for more details.
  7. *
  8. * Copyright Cavium, Inc. (C) 2015-2017. All rights reserved.
  9. *
  10. */
  11. #include <linux/module.h>
  12. #include <linux/pci.h>
  13. #include <linux/edac.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/string.h>
  16. #include <linux/stop_machine.h>
  17. #include <linux/delay.h>
  18. #include <linux/sizes.h>
  19. #include <linux/atomic.h>
  20. #include <linux/bitfield.h>
  21. #include <linux/circ_buf.h>
  22. #include <asm/page.h>
  23. #include "edac_module.h"
  24. #define phys_to_pfn(phys) (PFN_DOWN(phys))
  25. #define THUNDERX_NODE GENMASK(45, 44)
  26. enum {
  27. ERR_CORRECTED = 1,
  28. ERR_UNCORRECTED = 2,
  29. ERR_UNKNOWN = 3,
  30. };
  31. #define MAX_SYNDROME_REGS 4
  32. struct error_syndrome {
  33. u64 reg[MAX_SYNDROME_REGS];
  34. };
  35. struct error_descr {
  36. int type;
  37. u64 mask;
  38. char *descr;
  39. };
  40. static void decode_register(char *str, size_t size,
  41. const struct error_descr *descr,
  42. const uint64_t reg)
  43. {
  44. int ret = 0;
  45. while (descr->type && descr->mask && descr->descr) {
  46. if (reg & descr->mask) {
  47. ret = snprintf(str, size, "\n\t%s, %s",
  48. descr->type == ERR_CORRECTED ?
  49. "Corrected" : "Uncorrected",
  50. descr->descr);
  51. str += ret;
  52. size -= ret;
  53. }
  54. descr++;
  55. }
  56. }
  57. static unsigned long get_bits(unsigned long data, int pos, int width)
  58. {
  59. return (data >> pos) & ((1 << width) - 1);
  60. }
  61. #define L2C_CTL 0x87E080800000
  62. #define L2C_CTL_DISIDXALIAS BIT(0)
  63. #define PCI_DEVICE_ID_THUNDER_LMC 0xa022
  64. #define LMC_FADR 0x20
  65. #define LMC_FADR_FDIMM(x) ((x >> 37) & 0x1)
  66. #define LMC_FADR_FBUNK(x) ((x >> 36) & 0x1)
  67. #define LMC_FADR_FBANK(x) ((x >> 32) & 0xf)
  68. #define LMC_FADR_FROW(x) ((x >> 14) & 0xffff)
  69. #define LMC_FADR_FCOL(x) ((x >> 0) & 0x1fff)
  70. #define LMC_NXM_FADR 0x28
  71. #define LMC_ECC_SYND 0x38
  72. #define LMC_ECC_PARITY_TEST 0x108
  73. #define LMC_INT_W1S 0x150
  74. #define LMC_INT_ENA_W1C 0x158
  75. #define LMC_INT_ENA_W1S 0x160
  76. #define LMC_CONFIG 0x188
  77. #define LMC_CONFIG_BG2 BIT(62)
  78. #define LMC_CONFIG_RANK_ENA BIT(42)
  79. #define LMC_CONFIG_PBANK_LSB(x) (((x) >> 5) & 0xF)
  80. #define LMC_CONFIG_ROW_LSB(x) (((x) >> 2) & 0x7)
  81. #define LMC_CONTROL 0x190
  82. #define LMC_CONTROL_XOR_BANK BIT(16)
  83. #define LMC_INT 0x1F0
  84. #define LMC_INT_DDR_ERR BIT(11)
  85. #define LMC_INT_DED_ERR (0xFUL << 5)
  86. #define LMC_INT_SEC_ERR (0xFUL << 1)
  87. #define LMC_INT_NXM_WR_MASK BIT(0)
  88. #define LMC_DDR_PLL_CTL 0x258
  89. #define LMC_DDR_PLL_CTL_DDR4 BIT(29)
  90. #define LMC_FADR_SCRAMBLED 0x330
  91. #define LMC_INT_UE (LMC_INT_DDR_ERR | LMC_INT_DED_ERR | \
  92. LMC_INT_NXM_WR_MASK)
  93. #define LMC_INT_CE (LMC_INT_SEC_ERR)
  94. static const struct error_descr lmc_errors[] = {
  95. {
  96. .type = ERR_CORRECTED,
  97. .mask = LMC_INT_SEC_ERR,
  98. .descr = "Single-bit ECC error",
  99. },
  100. {
  101. .type = ERR_UNCORRECTED,
  102. .mask = LMC_INT_DDR_ERR,
  103. .descr = "DDR chip error",
  104. },
  105. {
  106. .type = ERR_UNCORRECTED,
  107. .mask = LMC_INT_DED_ERR,
  108. .descr = "Double-bit ECC error",
  109. },
  110. {
  111. .type = ERR_UNCORRECTED,
  112. .mask = LMC_INT_NXM_WR_MASK,
  113. .descr = "Non-existent memory write",
  114. },
  115. {0, 0, NULL},
  116. };
  117. #define LMC_INT_EN_DDR_ERROR_ALERT_ENA BIT(5)
  118. #define LMC_INT_EN_DLCRAM_DED_ERR BIT(4)
  119. #define LMC_INT_EN_DLCRAM_SEC_ERR BIT(3)
  120. #define LMC_INT_INTR_DED_ENA BIT(2)
  121. #define LMC_INT_INTR_SEC_ENA BIT(1)
  122. #define LMC_INT_INTR_NXM_WR_ENA BIT(0)
  123. #define LMC_INT_ENA_ALL GENMASK(5, 0)
  124. #define LMC_DDR_PLL_CTL 0x258
  125. #define LMC_DDR_PLL_CTL_DDR4 BIT(29)
  126. #define LMC_CONTROL 0x190
  127. #define LMC_CONTROL_RDIMM BIT(0)
  128. #define LMC_SCRAM_FADR 0x330
  129. #define LMC_CHAR_MASK0 0x228
  130. #define LMC_CHAR_MASK2 0x238
  131. #define RING_ENTRIES 8
  132. struct debugfs_entry {
  133. const char *name;
  134. umode_t mode;
  135. const struct file_operations fops;
  136. };
  137. struct lmc_err_ctx {
  138. u64 reg_int;
  139. u64 reg_fadr;
  140. u64 reg_nxm_fadr;
  141. u64 reg_scram_fadr;
  142. u64 reg_ecc_synd;
  143. };
  144. struct thunderx_lmc {
  145. void __iomem *regs;
  146. struct pci_dev *pdev;
  147. struct msix_entry msix_ent;
  148. atomic_t ecc_int;
  149. u64 mask0;
  150. u64 mask2;
  151. u64 parity_test;
  152. u64 node;
  153. int xbits;
  154. int bank_width;
  155. int pbank_lsb;
  156. int dimm_lsb;
  157. int rank_lsb;
  158. int bank_lsb;
  159. int row_lsb;
  160. int col_hi_lsb;
  161. int xor_bank;
  162. int l2c_alias;
  163. struct page *mem;
  164. struct lmc_err_ctx err_ctx[RING_ENTRIES];
  165. unsigned long ring_head;
  166. unsigned long ring_tail;
  167. };
  168. #define ring_pos(pos, size) ((pos) & (size - 1))
  169. #define DEBUGFS_STRUCT(_name, _mode, _write, _read) \
  170. static struct debugfs_entry debugfs_##_name = { \
  171. .name = __stringify(_name), \
  172. .mode = VERIFY_OCTAL_PERMISSIONS(_mode), \
  173. .fops = { \
  174. .open = simple_open, \
  175. .write = _write, \
  176. .read = _read, \
  177. .llseek = generic_file_llseek, \
  178. }, \
  179. }
  180. #define DEBUGFS_FIELD_ATTR(_type, _field) \
  181. static ssize_t thunderx_##_type##_##_field##_read(struct file *file, \
  182. char __user *data, \
  183. size_t count, loff_t *ppos) \
  184. { \
  185. struct thunderx_##_type *pdata = file->private_data; \
  186. char buf[20]; \
  187. \
  188. snprintf(buf, count, "0x%016llx", pdata->_field); \
  189. return simple_read_from_buffer(data, count, ppos, \
  190. buf, sizeof(buf)); \
  191. } \
  192. \
  193. static ssize_t thunderx_##_type##_##_field##_write(struct file *file, \
  194. const char __user *data, \
  195. size_t count, loff_t *ppos) \
  196. { \
  197. struct thunderx_##_type *pdata = file->private_data; \
  198. int res; \
  199. \
  200. res = kstrtoull_from_user(data, count, 0, &pdata->_field); \
  201. \
  202. return res ? res : count; \
  203. } \
  204. \
  205. DEBUGFS_STRUCT(_field, 0600, \
  206. thunderx_##_type##_##_field##_write, \
  207. thunderx_##_type##_##_field##_read) \
  208. #define DEBUGFS_REG_ATTR(_type, _name, _reg) \
  209. static ssize_t thunderx_##_type##_##_name##_read(struct file *file, \
  210. char __user *data, \
  211. size_t count, loff_t *ppos) \
  212. { \
  213. struct thunderx_##_type *pdata = file->private_data; \
  214. char buf[20]; \
  215. \
  216. sprintf(buf, "0x%016llx", readq(pdata->regs + _reg)); \
  217. return simple_read_from_buffer(data, count, ppos, \
  218. buf, sizeof(buf)); \
  219. } \
  220. \
  221. static ssize_t thunderx_##_type##_##_name##_write(struct file *file, \
  222. const char __user *data, \
  223. size_t count, loff_t *ppos) \
  224. { \
  225. struct thunderx_##_type *pdata = file->private_data; \
  226. u64 val; \
  227. int res; \
  228. \
  229. res = kstrtoull_from_user(data, count, 0, &val); \
  230. \
  231. if (!res) { \
  232. writeq(val, pdata->regs + _reg); \
  233. res = count; \
  234. } \
  235. \
  236. return res; \
  237. } \
  238. \
  239. DEBUGFS_STRUCT(_name, 0600, \
  240. thunderx_##_type##_##_name##_write, \
  241. thunderx_##_type##_##_name##_read)
  242. #define LMC_DEBUGFS_ENT(_field) DEBUGFS_FIELD_ATTR(lmc, _field)
  243. /*
  244. * To get an ECC error injected, the following steps are needed:
  245. * - Setup the ECC injection by writing the appropriate parameters:
  246. * echo <bit mask value> > /sys/kernel/debug/<device number>/ecc_mask0
  247. * echo <bit mask value> > /sys/kernel/debug/<device number>/ecc_mask2
  248. * echo 0x802 > /sys/kernel/debug/<device number>/ecc_parity_test
  249. * - Do the actual injection:
  250. * echo 1 > /sys/kernel/debug/<device number>/inject_ecc
  251. */
  252. static ssize_t thunderx_lmc_inject_int_write(struct file *file,
  253. const char __user *data,
  254. size_t count, loff_t *ppos)
  255. {
  256. struct thunderx_lmc *lmc = file->private_data;
  257. u64 val;
  258. int res;
  259. res = kstrtoull_from_user(data, count, 0, &val);
  260. if (!res) {
  261. /* Trigger the interrupt */
  262. writeq(val, lmc->regs + LMC_INT_W1S);
  263. res = count;
  264. }
  265. return res;
  266. }
  267. static ssize_t thunderx_lmc_int_read(struct file *file,
  268. char __user *data,
  269. size_t count, loff_t *ppos)
  270. {
  271. struct thunderx_lmc *lmc = file->private_data;
  272. char buf[20];
  273. u64 lmc_int = readq(lmc->regs + LMC_INT);
  274. snprintf(buf, sizeof(buf), "0x%016llx", lmc_int);
  275. return simple_read_from_buffer(data, count, ppos, buf, sizeof(buf));
  276. }
  277. #define TEST_PATTERN 0xa5
  278. static int inject_ecc_fn(void *arg)
  279. {
  280. struct thunderx_lmc *lmc = arg;
  281. uintptr_t addr, phys;
  282. unsigned int cline_size = cache_line_size();
  283. const unsigned int lines = PAGE_SIZE / cline_size;
  284. unsigned int i, cl_idx;
  285. addr = (uintptr_t)page_address(lmc->mem);
  286. phys = (uintptr_t)page_to_phys(lmc->mem);
  287. cl_idx = (phys & 0x7f) >> 4;
  288. lmc->parity_test &= ~(7ULL << 8);
  289. lmc->parity_test |= (cl_idx << 8);
  290. writeq(lmc->mask0, lmc->regs + LMC_CHAR_MASK0);
  291. writeq(lmc->mask2, lmc->regs + LMC_CHAR_MASK2);
  292. writeq(lmc->parity_test, lmc->regs + LMC_ECC_PARITY_TEST);
  293. readq(lmc->regs + LMC_CHAR_MASK0);
  294. readq(lmc->regs + LMC_CHAR_MASK2);
  295. readq(lmc->regs + LMC_ECC_PARITY_TEST);
  296. for (i = 0; i < lines; i++) {
  297. memset((void *)addr, TEST_PATTERN, cline_size);
  298. barrier();
  299. /*
  300. * Flush L1 cachelines to the PoC (L2).
  301. * This will cause cacheline eviction to the L2.
  302. */
  303. asm volatile("dc civac, %0\n"
  304. "dsb sy\n"
  305. : : "r"(addr + i * cline_size));
  306. }
  307. for (i = 0; i < lines; i++) {
  308. /*
  309. * Flush L2 cachelines to the DRAM.
  310. * This will cause cacheline eviction to the DRAM
  311. * and ECC corruption according to the masks set.
  312. */
  313. __asm__ volatile("sys #0,c11,C1,#2, %0\n"
  314. : : "r"(phys + i * cline_size));
  315. }
  316. for (i = 0; i < lines; i++) {
  317. /*
  318. * Invalidate L2 cachelines.
  319. * The subsequent load will cause cacheline fetch
  320. * from the DRAM and an error interrupt
  321. */
  322. __asm__ volatile("sys #0,c11,C1,#1, %0"
  323. : : "r"(phys + i * cline_size));
  324. }
  325. for (i = 0; i < lines; i++) {
  326. /*
  327. * Invalidate L1 cachelines.
  328. * The subsequent load will cause cacheline fetch
  329. * from the L2 and/or DRAM
  330. */
  331. asm volatile("dc ivac, %0\n"
  332. "dsb sy\n"
  333. : : "r"(addr + i * cline_size));
  334. }
  335. return 0;
  336. }
  337. static ssize_t thunderx_lmc_inject_ecc_write(struct file *file,
  338. const char __user *data,
  339. size_t count, loff_t *ppos)
  340. {
  341. struct thunderx_lmc *lmc = file->private_data;
  342. unsigned int cline_size = cache_line_size();
  343. u8 tmp[cline_size];
  344. void __iomem *addr;
  345. unsigned int offs, timeout = 100000;
  346. atomic_set(&lmc->ecc_int, 0);
  347. lmc->mem = alloc_pages_node(lmc->node, GFP_KERNEL, 0);
  348. if (!lmc->mem)
  349. return -ENOMEM;
  350. addr = page_address(lmc->mem);
  351. while (!atomic_read(&lmc->ecc_int) && timeout--) {
  352. stop_machine(inject_ecc_fn, lmc, NULL);
  353. for (offs = 0; offs < PAGE_SIZE; offs += sizeof(tmp)) {
  354. /*
  355. * Do a load from the previously rigged location
  356. * This should generate an error interrupt.
  357. */
  358. memcpy(tmp, addr + offs, cline_size);
  359. asm volatile("dsb ld\n");
  360. }
  361. }
  362. __free_pages(lmc->mem, 0);
  363. return count;
  364. }
  365. LMC_DEBUGFS_ENT(mask0);
  366. LMC_DEBUGFS_ENT(mask2);
  367. LMC_DEBUGFS_ENT(parity_test);
  368. DEBUGFS_STRUCT(inject_int, 0200, thunderx_lmc_inject_int_write, NULL);
  369. DEBUGFS_STRUCT(inject_ecc, 0200, thunderx_lmc_inject_ecc_write, NULL);
  370. DEBUGFS_STRUCT(int_w1c, 0400, NULL, thunderx_lmc_int_read);
  371. struct debugfs_entry *lmc_dfs_ents[] = {
  372. &debugfs_mask0,
  373. &debugfs_mask2,
  374. &debugfs_parity_test,
  375. &debugfs_inject_ecc,
  376. &debugfs_inject_int,
  377. &debugfs_int_w1c,
  378. };
  379. static int thunderx_create_debugfs_nodes(struct dentry *parent,
  380. struct debugfs_entry *attrs[],
  381. void *data,
  382. size_t num)
  383. {
  384. int i;
  385. struct dentry *ent;
  386. if (!IS_ENABLED(CONFIG_EDAC_DEBUG))
  387. return 0;
  388. if (!parent)
  389. return -ENOENT;
  390. for (i = 0; i < num; i++) {
  391. ent = edac_debugfs_create_file(attrs[i]->name, attrs[i]->mode,
  392. parent, data, &attrs[i]->fops);
  393. if (!ent)
  394. break;
  395. }
  396. return i;
  397. }
  398. static phys_addr_t thunderx_faddr_to_phys(u64 faddr, struct thunderx_lmc *lmc)
  399. {
  400. phys_addr_t addr = 0;
  401. int bank, xbits;
  402. addr |= lmc->node << 40;
  403. addr |= LMC_FADR_FDIMM(faddr) << lmc->dimm_lsb;
  404. addr |= LMC_FADR_FBUNK(faddr) << lmc->rank_lsb;
  405. addr |= LMC_FADR_FROW(faddr) << lmc->row_lsb;
  406. addr |= (LMC_FADR_FCOL(faddr) >> 4) << lmc->col_hi_lsb;
  407. bank = LMC_FADR_FBANK(faddr) << lmc->bank_lsb;
  408. if (lmc->xor_bank)
  409. bank ^= get_bits(addr, 12 + lmc->xbits, lmc->bank_width);
  410. addr |= bank << lmc->bank_lsb;
  411. xbits = PCI_FUNC(lmc->pdev->devfn);
  412. if (lmc->l2c_alias)
  413. xbits ^= get_bits(addr, 20, lmc->xbits) ^
  414. get_bits(addr, 12, lmc->xbits);
  415. addr |= xbits << 7;
  416. return addr;
  417. }
  418. static unsigned int thunderx_get_num_lmcs(unsigned int node)
  419. {
  420. unsigned int number = 0;
  421. struct pci_dev *pdev = NULL;
  422. do {
  423. pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
  424. PCI_DEVICE_ID_THUNDER_LMC,
  425. pdev);
  426. if (pdev) {
  427. #ifdef CONFIG_NUMA
  428. if (pdev->dev.numa_node == node)
  429. number++;
  430. #else
  431. number++;
  432. #endif
  433. }
  434. } while (pdev);
  435. return number;
  436. }
  437. #define LMC_MESSAGE_SIZE 120
  438. #define LMC_OTHER_SIZE (50 * ARRAY_SIZE(lmc_errors))
  439. static irqreturn_t thunderx_lmc_err_isr(int irq, void *dev_id)
  440. {
  441. struct mem_ctl_info *mci = dev_id;
  442. struct thunderx_lmc *lmc = mci->pvt_info;
  443. unsigned long head = ring_pos(lmc->ring_head, ARRAY_SIZE(lmc->err_ctx));
  444. struct lmc_err_ctx *ctx = &lmc->err_ctx[head];
  445. writeq(0, lmc->regs + LMC_CHAR_MASK0);
  446. writeq(0, lmc->regs + LMC_CHAR_MASK2);
  447. writeq(0x2, lmc->regs + LMC_ECC_PARITY_TEST);
  448. ctx->reg_int = readq(lmc->regs + LMC_INT);
  449. ctx->reg_fadr = readq(lmc->regs + LMC_FADR);
  450. ctx->reg_nxm_fadr = readq(lmc->regs + LMC_NXM_FADR);
  451. ctx->reg_scram_fadr = readq(lmc->regs + LMC_SCRAM_FADR);
  452. ctx->reg_ecc_synd = readq(lmc->regs + LMC_ECC_SYND);
  453. lmc->ring_head++;
  454. atomic_set(&lmc->ecc_int, 1);
  455. /* Clear the interrupt */
  456. writeq(ctx->reg_int, lmc->regs + LMC_INT);
  457. return IRQ_WAKE_THREAD;
  458. }
  459. static irqreturn_t thunderx_lmc_threaded_isr(int irq, void *dev_id)
  460. {
  461. struct mem_ctl_info *mci = dev_id;
  462. struct thunderx_lmc *lmc = mci->pvt_info;
  463. phys_addr_t phys_addr;
  464. unsigned long tail;
  465. struct lmc_err_ctx *ctx;
  466. irqreturn_t ret = IRQ_NONE;
  467. char *msg;
  468. char *other;
  469. msg = kmalloc(LMC_MESSAGE_SIZE, GFP_KERNEL);
  470. other = kmalloc(LMC_OTHER_SIZE, GFP_KERNEL);
  471. if (!msg || !other)
  472. goto err_free;
  473. while (CIRC_CNT(lmc->ring_head, lmc->ring_tail,
  474. ARRAY_SIZE(lmc->err_ctx))) {
  475. tail = ring_pos(lmc->ring_tail, ARRAY_SIZE(lmc->err_ctx));
  476. ctx = &lmc->err_ctx[tail];
  477. dev_dbg(&lmc->pdev->dev, "LMC_INT: %016llx\n",
  478. ctx->reg_int);
  479. dev_dbg(&lmc->pdev->dev, "LMC_FADR: %016llx\n",
  480. ctx->reg_fadr);
  481. dev_dbg(&lmc->pdev->dev, "LMC_NXM_FADR: %016llx\n",
  482. ctx->reg_nxm_fadr);
  483. dev_dbg(&lmc->pdev->dev, "LMC_SCRAM_FADR: %016llx\n",
  484. ctx->reg_scram_fadr);
  485. dev_dbg(&lmc->pdev->dev, "LMC_ECC_SYND: %016llx\n",
  486. ctx->reg_ecc_synd);
  487. snprintf(msg, LMC_MESSAGE_SIZE,
  488. "DIMM %lld rank %lld bank %lld row %lld col %lld",
  489. LMC_FADR_FDIMM(ctx->reg_scram_fadr),
  490. LMC_FADR_FBUNK(ctx->reg_scram_fadr),
  491. LMC_FADR_FBANK(ctx->reg_scram_fadr),
  492. LMC_FADR_FROW(ctx->reg_scram_fadr),
  493. LMC_FADR_FCOL(ctx->reg_scram_fadr));
  494. decode_register(other, LMC_OTHER_SIZE, lmc_errors,
  495. ctx->reg_int);
  496. phys_addr = thunderx_faddr_to_phys(ctx->reg_fadr, lmc);
  497. if (ctx->reg_int & LMC_INT_UE)
  498. edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
  499. phys_to_pfn(phys_addr),
  500. offset_in_page(phys_addr),
  501. 0, -1, -1, -1, msg, other);
  502. else if (ctx->reg_int & LMC_INT_CE)
  503. edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
  504. phys_to_pfn(phys_addr),
  505. offset_in_page(phys_addr),
  506. 0, -1, -1, -1, msg, other);
  507. lmc->ring_tail++;
  508. }
  509. ret = IRQ_HANDLED;
  510. err_free:
  511. kfree(msg);
  512. kfree(other);
  513. return ret;
  514. }
  515. #ifdef CONFIG_PM
  516. static int thunderx_lmc_suspend(struct pci_dev *pdev, pm_message_t state)
  517. {
  518. pci_save_state(pdev);
  519. pci_disable_device(pdev);
  520. pci_set_power_state(pdev, pci_choose_state(pdev, state));
  521. return 0;
  522. }
  523. static int thunderx_lmc_resume(struct pci_dev *pdev)
  524. {
  525. pci_set_power_state(pdev, PCI_D0);
  526. pci_enable_wake(pdev, PCI_D0, 0);
  527. pci_restore_state(pdev);
  528. return 0;
  529. }
  530. #endif
  531. static const struct pci_device_id thunderx_lmc_pci_tbl[] = {
  532. { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_LMC) },
  533. { 0, },
  534. };
  535. static inline int pci_dev_to_mc_idx(struct pci_dev *pdev)
  536. {
  537. int node = dev_to_node(&pdev->dev);
  538. int ret = PCI_FUNC(pdev->devfn);
  539. ret += max(node, 0) << 3;
  540. return ret;
  541. }
  542. static int thunderx_lmc_probe(struct pci_dev *pdev,
  543. const struct pci_device_id *id)
  544. {
  545. struct thunderx_lmc *lmc;
  546. struct edac_mc_layer layer;
  547. struct mem_ctl_info *mci;
  548. u64 lmc_control, lmc_ddr_pll_ctl, lmc_config;
  549. int ret;
  550. u64 lmc_int;
  551. void *l2c_ioaddr;
  552. layer.type = EDAC_MC_LAYER_SLOT;
  553. layer.size = 2;
  554. layer.is_virt_csrow = false;
  555. ret = pcim_enable_device(pdev);
  556. if (ret) {
  557. dev_err(&pdev->dev, "Cannot enable PCI device: %d\n", ret);
  558. return ret;
  559. }
  560. ret = pcim_iomap_regions(pdev, BIT(0), "thunderx_lmc");
  561. if (ret) {
  562. dev_err(&pdev->dev, "Cannot map PCI resources: %d\n", ret);
  563. return ret;
  564. }
  565. mci = edac_mc_alloc(pci_dev_to_mc_idx(pdev), 1, &layer,
  566. sizeof(struct thunderx_lmc));
  567. if (!mci)
  568. return -ENOMEM;
  569. mci->pdev = &pdev->dev;
  570. lmc = mci->pvt_info;
  571. pci_set_drvdata(pdev, mci);
  572. lmc->regs = pcim_iomap_table(pdev)[0];
  573. lmc_control = readq(lmc->regs + LMC_CONTROL);
  574. lmc_ddr_pll_ctl = readq(lmc->regs + LMC_DDR_PLL_CTL);
  575. lmc_config = readq(lmc->regs + LMC_CONFIG);
  576. if (lmc_control & LMC_CONTROL_RDIMM) {
  577. mci->mtype_cap = FIELD_GET(LMC_DDR_PLL_CTL_DDR4,
  578. lmc_ddr_pll_ctl) ?
  579. MEM_RDDR4 : MEM_RDDR3;
  580. } else {
  581. mci->mtype_cap = FIELD_GET(LMC_DDR_PLL_CTL_DDR4,
  582. lmc_ddr_pll_ctl) ?
  583. MEM_DDR4 : MEM_DDR3;
  584. }
  585. mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
  586. mci->edac_cap = EDAC_FLAG_SECDED;
  587. mci->mod_name = "thunderx-lmc";
  588. mci->ctl_name = "thunderx-lmc";
  589. mci->dev_name = dev_name(&pdev->dev);
  590. mci->scrub_mode = SCRUB_NONE;
  591. lmc->pdev = pdev;
  592. lmc->msix_ent.entry = 0;
  593. lmc->ring_head = 0;
  594. lmc->ring_tail = 0;
  595. ret = pci_enable_msix_exact(pdev, &lmc->msix_ent, 1);
  596. if (ret) {
  597. dev_err(&pdev->dev, "Cannot enable interrupt: %d\n", ret);
  598. goto err_free;
  599. }
  600. ret = devm_request_threaded_irq(&pdev->dev, lmc->msix_ent.vector,
  601. thunderx_lmc_err_isr,
  602. thunderx_lmc_threaded_isr, 0,
  603. "[EDAC] ThunderX LMC", mci);
  604. if (ret) {
  605. dev_err(&pdev->dev, "Cannot set ISR: %d\n", ret);
  606. goto err_free;
  607. }
  608. lmc->node = FIELD_GET(THUNDERX_NODE, pci_resource_start(pdev, 0));
  609. lmc->xbits = thunderx_get_num_lmcs(lmc->node) >> 1;
  610. lmc->bank_width = (FIELD_GET(LMC_DDR_PLL_CTL_DDR4, lmc_ddr_pll_ctl) &&
  611. FIELD_GET(LMC_CONFIG_BG2, lmc_config)) ? 4 : 3;
  612. lmc->pbank_lsb = (lmc_config >> 5) & 0xf;
  613. lmc->dimm_lsb = 28 + lmc->pbank_lsb + lmc->xbits;
  614. lmc->rank_lsb = lmc->dimm_lsb;
  615. lmc->rank_lsb -= FIELD_GET(LMC_CONFIG_RANK_ENA, lmc_config) ? 1 : 0;
  616. lmc->bank_lsb = 7 + lmc->xbits;
  617. lmc->row_lsb = 14 + LMC_CONFIG_ROW_LSB(lmc_config) + lmc->xbits;
  618. lmc->col_hi_lsb = lmc->bank_lsb + lmc->bank_width;
  619. lmc->xor_bank = lmc_control & LMC_CONTROL_XOR_BANK;
  620. l2c_ioaddr = ioremap(L2C_CTL | FIELD_PREP(THUNDERX_NODE, lmc->node), PAGE_SIZE);
  621. if (!l2c_ioaddr) {
  622. dev_err(&pdev->dev, "Cannot map L2C_CTL\n");
  623. ret = -ENOMEM;
  624. goto err_free;
  625. }
  626. lmc->l2c_alias = !(readq(l2c_ioaddr) & L2C_CTL_DISIDXALIAS);
  627. iounmap(l2c_ioaddr);
  628. ret = edac_mc_add_mc(mci);
  629. if (ret) {
  630. dev_err(&pdev->dev, "Cannot add the MC: %d\n", ret);
  631. goto err_free;
  632. }
  633. lmc_int = readq(lmc->regs + LMC_INT);
  634. writeq(lmc_int, lmc->regs + LMC_INT);
  635. writeq(LMC_INT_ENA_ALL, lmc->regs + LMC_INT_ENA_W1S);
  636. if (IS_ENABLED(CONFIG_EDAC_DEBUG)) {
  637. ret = thunderx_create_debugfs_nodes(mci->debugfs,
  638. lmc_dfs_ents,
  639. lmc,
  640. ARRAY_SIZE(lmc_dfs_ents));
  641. if (ret != ARRAY_SIZE(lmc_dfs_ents)) {
  642. dev_warn(&pdev->dev, "Error creating debugfs entries: %d%s\n",
  643. ret, ret >= 0 ? " created" : "");
  644. }
  645. }
  646. return 0;
  647. err_free:
  648. pci_set_drvdata(pdev, NULL);
  649. edac_mc_free(mci);
  650. return ret;
  651. }
  652. static void thunderx_lmc_remove(struct pci_dev *pdev)
  653. {
  654. struct mem_ctl_info *mci = pci_get_drvdata(pdev);
  655. struct thunderx_lmc *lmc = mci->pvt_info;
  656. writeq(LMC_INT_ENA_ALL, lmc->regs + LMC_INT_ENA_W1C);
  657. edac_mc_del_mc(&pdev->dev);
  658. edac_mc_free(mci);
  659. }
  660. MODULE_DEVICE_TABLE(pci, thunderx_lmc_pci_tbl);
  661. static struct pci_driver thunderx_lmc_driver = {
  662. .name = "thunderx_lmc_edac",
  663. .probe = thunderx_lmc_probe,
  664. .remove = thunderx_lmc_remove,
  665. #ifdef CONFIG_PM
  666. .suspend = thunderx_lmc_suspend,
  667. .resume = thunderx_lmc_resume,
  668. #endif
  669. .id_table = thunderx_lmc_pci_tbl,
  670. };
  671. /*---------------------- OCX driver ---------------------------------*/
  672. #define PCI_DEVICE_ID_THUNDER_OCX 0xa013
  673. #define OCX_LINK_INTS 3
  674. #define OCX_INTS (OCX_LINK_INTS + 1)
  675. #define OCX_RX_LANES 24
  676. #define OCX_RX_LANE_STATS 15
  677. #define OCX_COM_INT 0x100
  678. #define OCX_COM_INT_W1S 0x108
  679. #define OCX_COM_INT_ENA_W1S 0x110
  680. #define OCX_COM_INT_ENA_W1C 0x118
  681. #define OCX_COM_IO_BADID BIT(54)
  682. #define OCX_COM_MEM_BADID BIT(53)
  683. #define OCX_COM_COPR_BADID BIT(52)
  684. #define OCX_COM_WIN_REQ_BADID BIT(51)
  685. #define OCX_COM_WIN_REQ_TOUT BIT(50)
  686. #define OCX_COM_RX_LANE GENMASK(23, 0)
  687. #define OCX_COM_INT_CE (OCX_COM_IO_BADID | \
  688. OCX_COM_MEM_BADID | \
  689. OCX_COM_COPR_BADID | \
  690. OCX_COM_WIN_REQ_BADID | \
  691. OCX_COM_WIN_REQ_TOUT)
  692. static const struct error_descr ocx_com_errors[] = {
  693. {
  694. .type = ERR_CORRECTED,
  695. .mask = OCX_COM_IO_BADID,
  696. .descr = "Invalid IO transaction node ID",
  697. },
  698. {
  699. .type = ERR_CORRECTED,
  700. .mask = OCX_COM_MEM_BADID,
  701. .descr = "Invalid memory transaction node ID",
  702. },
  703. {
  704. .type = ERR_CORRECTED,
  705. .mask = OCX_COM_COPR_BADID,
  706. .descr = "Invalid coprocessor transaction node ID",
  707. },
  708. {
  709. .type = ERR_CORRECTED,
  710. .mask = OCX_COM_WIN_REQ_BADID,
  711. .descr = "Invalid SLI transaction node ID",
  712. },
  713. {
  714. .type = ERR_CORRECTED,
  715. .mask = OCX_COM_WIN_REQ_TOUT,
  716. .descr = "Window/core request timeout",
  717. },
  718. {0, 0, NULL},
  719. };
  720. #define OCX_COM_LINKX_INT(x) (0x120 + (x) * 8)
  721. #define OCX_COM_LINKX_INT_W1S(x) (0x140 + (x) * 8)
  722. #define OCX_COM_LINKX_INT_ENA_W1S(x) (0x160 + (x) * 8)
  723. #define OCX_COM_LINKX_INT_ENA_W1C(x) (0x180 + (x) * 8)
  724. #define OCX_COM_LINK_BAD_WORD BIT(13)
  725. #define OCX_COM_LINK_ALIGN_FAIL BIT(12)
  726. #define OCX_COM_LINK_ALIGN_DONE BIT(11)
  727. #define OCX_COM_LINK_UP BIT(10)
  728. #define OCX_COM_LINK_STOP BIT(9)
  729. #define OCX_COM_LINK_BLK_ERR BIT(8)
  730. #define OCX_COM_LINK_REINIT BIT(7)
  731. #define OCX_COM_LINK_LNK_DATA BIT(6)
  732. #define OCX_COM_LINK_RXFIFO_DBE BIT(5)
  733. #define OCX_COM_LINK_RXFIFO_SBE BIT(4)
  734. #define OCX_COM_LINK_TXFIFO_DBE BIT(3)
  735. #define OCX_COM_LINK_TXFIFO_SBE BIT(2)
  736. #define OCX_COM_LINK_REPLAY_DBE BIT(1)
  737. #define OCX_COM_LINK_REPLAY_SBE BIT(0)
  738. static const struct error_descr ocx_com_link_errors[] = {
  739. {
  740. .type = ERR_CORRECTED,
  741. .mask = OCX_COM_LINK_REPLAY_SBE,
  742. .descr = "Replay buffer single-bit error",
  743. },
  744. {
  745. .type = ERR_CORRECTED,
  746. .mask = OCX_COM_LINK_TXFIFO_SBE,
  747. .descr = "TX FIFO single-bit error",
  748. },
  749. {
  750. .type = ERR_CORRECTED,
  751. .mask = OCX_COM_LINK_RXFIFO_SBE,
  752. .descr = "RX FIFO single-bit error",
  753. },
  754. {
  755. .type = ERR_CORRECTED,
  756. .mask = OCX_COM_LINK_BLK_ERR,
  757. .descr = "Block code error",
  758. },
  759. {
  760. .type = ERR_CORRECTED,
  761. .mask = OCX_COM_LINK_ALIGN_FAIL,
  762. .descr = "Link alignment failure",
  763. },
  764. {
  765. .type = ERR_CORRECTED,
  766. .mask = OCX_COM_LINK_BAD_WORD,
  767. .descr = "Bad code word",
  768. },
  769. {
  770. .type = ERR_UNCORRECTED,
  771. .mask = OCX_COM_LINK_REPLAY_DBE,
  772. .descr = "Replay buffer double-bit error",
  773. },
  774. {
  775. .type = ERR_UNCORRECTED,
  776. .mask = OCX_COM_LINK_TXFIFO_DBE,
  777. .descr = "TX FIFO double-bit error",
  778. },
  779. {
  780. .type = ERR_UNCORRECTED,
  781. .mask = OCX_COM_LINK_RXFIFO_DBE,
  782. .descr = "RX FIFO double-bit error",
  783. },
  784. {
  785. .type = ERR_UNCORRECTED,
  786. .mask = OCX_COM_LINK_STOP,
  787. .descr = "Link stopped",
  788. },
  789. {0, 0, NULL},
  790. };
  791. #define OCX_COM_LINK_INT_UE (OCX_COM_LINK_REPLAY_DBE | \
  792. OCX_COM_LINK_TXFIFO_DBE | \
  793. OCX_COM_LINK_RXFIFO_DBE | \
  794. OCX_COM_LINK_STOP)
  795. #define OCX_COM_LINK_INT_CE (OCX_COM_LINK_REPLAY_SBE | \
  796. OCX_COM_LINK_TXFIFO_SBE | \
  797. OCX_COM_LINK_RXFIFO_SBE | \
  798. OCX_COM_LINK_BLK_ERR | \
  799. OCX_COM_LINK_ALIGN_FAIL | \
  800. OCX_COM_LINK_BAD_WORD)
  801. #define OCX_LNE_INT(x) (0x8018 + (x) * 0x100)
  802. #define OCX_LNE_INT_EN(x) (0x8020 + (x) * 0x100)
  803. #define OCX_LNE_BAD_CNT(x) (0x8028 + (x) * 0x100)
  804. #define OCX_LNE_CFG(x) (0x8000 + (x) * 0x100)
  805. #define OCX_LNE_STAT(x, y) (0x8040 + (x) * 0x100 + (y) * 8)
  806. #define OCX_LNE_CFG_RX_BDRY_LOCK_DIS BIT(8)
  807. #define OCX_LNE_CFG_RX_STAT_WRAP_DIS BIT(2)
  808. #define OCX_LNE_CFG_RX_STAT_RDCLR BIT(1)
  809. #define OCX_LNE_CFG_RX_STAT_ENA BIT(0)
  810. #define OCX_LANE_BAD_64B67B BIT(8)
  811. #define OCX_LANE_DSKEW_FIFO_OVFL BIT(5)
  812. #define OCX_LANE_SCRM_SYNC_LOSS BIT(4)
  813. #define OCX_LANE_UKWN_CNTL_WORD BIT(3)
  814. #define OCX_LANE_CRC32_ERR BIT(2)
  815. #define OCX_LANE_BDRY_SYNC_LOSS BIT(1)
  816. #define OCX_LANE_SERDES_LOCK_LOSS BIT(0)
  817. #define OCX_COM_LANE_INT_UE (0)
  818. #define OCX_COM_LANE_INT_CE (OCX_LANE_SERDES_LOCK_LOSS | \
  819. OCX_LANE_BDRY_SYNC_LOSS | \
  820. OCX_LANE_CRC32_ERR | \
  821. OCX_LANE_UKWN_CNTL_WORD | \
  822. OCX_LANE_SCRM_SYNC_LOSS | \
  823. OCX_LANE_DSKEW_FIFO_OVFL | \
  824. OCX_LANE_BAD_64B67B)
  825. static const struct error_descr ocx_lane_errors[] = {
  826. {
  827. .type = ERR_CORRECTED,
  828. .mask = OCX_LANE_SERDES_LOCK_LOSS,
  829. .descr = "RX SerDes lock lost",
  830. },
  831. {
  832. .type = ERR_CORRECTED,
  833. .mask = OCX_LANE_BDRY_SYNC_LOSS,
  834. .descr = "RX word boundary lost",
  835. },
  836. {
  837. .type = ERR_CORRECTED,
  838. .mask = OCX_LANE_CRC32_ERR,
  839. .descr = "CRC32 error",
  840. },
  841. {
  842. .type = ERR_CORRECTED,
  843. .mask = OCX_LANE_UKWN_CNTL_WORD,
  844. .descr = "Unknown control word",
  845. },
  846. {
  847. .type = ERR_CORRECTED,
  848. .mask = OCX_LANE_SCRM_SYNC_LOSS,
  849. .descr = "Scrambler synchronization lost",
  850. },
  851. {
  852. .type = ERR_CORRECTED,
  853. .mask = OCX_LANE_DSKEW_FIFO_OVFL,
  854. .descr = "RX deskew FIFO overflow",
  855. },
  856. {
  857. .type = ERR_CORRECTED,
  858. .mask = OCX_LANE_BAD_64B67B,
  859. .descr = "Bad 64B/67B codeword",
  860. },
  861. {0, 0, NULL},
  862. };
  863. #define OCX_LNE_INT_ENA_ALL (GENMASK(9, 8) | GENMASK(6, 0))
  864. #define OCX_COM_INT_ENA_ALL (GENMASK(54, 50) | GENMASK(23, 0))
  865. #define OCX_COM_LINKX_INT_ENA_ALL (GENMASK(13, 12) | \
  866. GENMASK(9, 7) | GENMASK(5, 0))
  867. #define OCX_TLKX_ECC_CTL(x) (0x10018 + (x) * 0x2000)
  868. #define OCX_RLKX_ECC_CTL(x) (0x18018 + (x) * 0x2000)
  869. struct ocx_com_err_ctx {
  870. u64 reg_com_int;
  871. u64 reg_lane_int[OCX_RX_LANES];
  872. u64 reg_lane_stat11[OCX_RX_LANES];
  873. };
  874. struct ocx_link_err_ctx {
  875. u64 reg_com_link_int;
  876. int link;
  877. };
  878. struct thunderx_ocx {
  879. void __iomem *regs;
  880. int com_link;
  881. struct pci_dev *pdev;
  882. struct edac_device_ctl_info *edac_dev;
  883. struct dentry *debugfs;
  884. struct msix_entry msix_ent[OCX_INTS];
  885. struct ocx_com_err_ctx com_err_ctx[RING_ENTRIES];
  886. struct ocx_link_err_ctx link_err_ctx[RING_ENTRIES];
  887. unsigned long com_ring_head;
  888. unsigned long com_ring_tail;
  889. unsigned long link_ring_head;
  890. unsigned long link_ring_tail;
  891. };
  892. #define OCX_MESSAGE_SIZE SZ_1K
  893. #define OCX_OTHER_SIZE (50 * ARRAY_SIZE(ocx_com_link_errors))
  894. /* This handler is threaded */
  895. static irqreturn_t thunderx_ocx_com_isr(int irq, void *irq_id)
  896. {
  897. struct msix_entry *msix = irq_id;
  898. struct thunderx_ocx *ocx = container_of(msix, struct thunderx_ocx,
  899. msix_ent[msix->entry]);
  900. int lane;
  901. unsigned long head = ring_pos(ocx->com_ring_head,
  902. ARRAY_SIZE(ocx->com_err_ctx));
  903. struct ocx_com_err_ctx *ctx = &ocx->com_err_ctx[head];
  904. ctx->reg_com_int = readq(ocx->regs + OCX_COM_INT);
  905. for (lane = 0; lane < OCX_RX_LANES; lane++) {
  906. ctx->reg_lane_int[lane] =
  907. readq(ocx->regs + OCX_LNE_INT(lane));
  908. ctx->reg_lane_stat11[lane] =
  909. readq(ocx->regs + OCX_LNE_STAT(lane, 11));
  910. writeq(ctx->reg_lane_int[lane], ocx->regs + OCX_LNE_INT(lane));
  911. }
  912. writeq(ctx->reg_com_int, ocx->regs + OCX_COM_INT);
  913. ocx->com_ring_head++;
  914. return IRQ_WAKE_THREAD;
  915. }
  916. static irqreturn_t thunderx_ocx_com_threaded_isr(int irq, void *irq_id)
  917. {
  918. struct msix_entry *msix = irq_id;
  919. struct thunderx_ocx *ocx = container_of(msix, struct thunderx_ocx,
  920. msix_ent[msix->entry]);
  921. irqreturn_t ret = IRQ_NONE;
  922. unsigned long tail;
  923. struct ocx_com_err_ctx *ctx;
  924. int lane;
  925. char *msg;
  926. char *other;
  927. msg = kmalloc(OCX_MESSAGE_SIZE, GFP_KERNEL);
  928. other = kmalloc(OCX_OTHER_SIZE, GFP_KERNEL);
  929. if (!msg || !other)
  930. goto err_free;
  931. while (CIRC_CNT(ocx->com_ring_head, ocx->com_ring_tail,
  932. ARRAY_SIZE(ocx->com_err_ctx))) {
  933. tail = ring_pos(ocx->com_ring_tail,
  934. ARRAY_SIZE(ocx->com_err_ctx));
  935. ctx = &ocx->com_err_ctx[tail];
  936. snprintf(msg, OCX_MESSAGE_SIZE, "%s: OCX_COM_INT: %016llx",
  937. ocx->edac_dev->ctl_name, ctx->reg_com_int);
  938. decode_register(other, OCX_OTHER_SIZE,
  939. ocx_com_errors, ctx->reg_com_int);
  940. strncat(msg, other, OCX_MESSAGE_SIZE);
  941. for (lane = 0; lane < OCX_RX_LANES; lane++)
  942. if (ctx->reg_com_int & BIT(lane)) {
  943. snprintf(other, OCX_OTHER_SIZE,
  944. "\n\tOCX_LNE_INT[%02d]: %016llx OCX_LNE_STAT11[%02d]: %016llx",
  945. lane, ctx->reg_lane_int[lane],
  946. lane, ctx->reg_lane_stat11[lane]);
  947. strncat(msg, other, OCX_MESSAGE_SIZE);
  948. decode_register(other, OCX_OTHER_SIZE,
  949. ocx_lane_errors,
  950. ctx->reg_lane_int[lane]);
  951. strncat(msg, other, OCX_MESSAGE_SIZE);
  952. }
  953. if (ctx->reg_com_int & OCX_COM_INT_CE)
  954. edac_device_handle_ce(ocx->edac_dev, 0, 0, msg);
  955. ocx->com_ring_tail++;
  956. }
  957. ret = IRQ_HANDLED;
  958. err_free:
  959. kfree(other);
  960. kfree(msg);
  961. return ret;
  962. }
  963. static irqreturn_t thunderx_ocx_lnk_isr(int irq, void *irq_id)
  964. {
  965. struct msix_entry *msix = irq_id;
  966. struct thunderx_ocx *ocx = container_of(msix, struct thunderx_ocx,
  967. msix_ent[msix->entry]);
  968. unsigned long head = ring_pos(ocx->link_ring_head,
  969. ARRAY_SIZE(ocx->link_err_ctx));
  970. struct ocx_link_err_ctx *ctx = &ocx->link_err_ctx[head];
  971. ctx->link = msix->entry;
  972. ctx->reg_com_link_int = readq(ocx->regs + OCX_COM_LINKX_INT(ctx->link));
  973. writeq(ctx->reg_com_link_int, ocx->regs + OCX_COM_LINKX_INT(ctx->link));
  974. ocx->link_ring_head++;
  975. return IRQ_WAKE_THREAD;
  976. }
  977. static irqreturn_t thunderx_ocx_lnk_threaded_isr(int irq, void *irq_id)
  978. {
  979. struct msix_entry *msix = irq_id;
  980. struct thunderx_ocx *ocx = container_of(msix, struct thunderx_ocx,
  981. msix_ent[msix->entry]);
  982. irqreturn_t ret = IRQ_NONE;
  983. unsigned long tail;
  984. struct ocx_link_err_ctx *ctx;
  985. char *msg;
  986. char *other;
  987. msg = kmalloc(OCX_MESSAGE_SIZE, GFP_KERNEL);
  988. other = kmalloc(OCX_OTHER_SIZE, GFP_KERNEL);
  989. if (!msg || !other)
  990. goto err_free;
  991. while (CIRC_CNT(ocx->link_ring_head, ocx->link_ring_tail,
  992. ARRAY_SIZE(ocx->link_err_ctx))) {
  993. tail = ring_pos(ocx->link_ring_head,
  994. ARRAY_SIZE(ocx->link_err_ctx));
  995. ctx = &ocx->link_err_ctx[tail];
  996. snprintf(msg, OCX_MESSAGE_SIZE,
  997. "%s: OCX_COM_LINK_INT[%d]: %016llx",
  998. ocx->edac_dev->ctl_name,
  999. ctx->link, ctx->reg_com_link_int);
  1000. decode_register(other, OCX_OTHER_SIZE,
  1001. ocx_com_link_errors, ctx->reg_com_link_int);
  1002. strncat(msg, other, OCX_MESSAGE_SIZE);
  1003. if (ctx->reg_com_link_int & OCX_COM_LINK_INT_UE)
  1004. edac_device_handle_ue(ocx->edac_dev, 0, 0, msg);
  1005. else if (ctx->reg_com_link_int & OCX_COM_LINK_INT_CE)
  1006. edac_device_handle_ce(ocx->edac_dev, 0, 0, msg);
  1007. ocx->link_ring_tail++;
  1008. }
  1009. ret = IRQ_HANDLED;
  1010. err_free:
  1011. kfree(other);
  1012. kfree(msg);
  1013. return ret;
  1014. }
  1015. #define OCX_DEBUGFS_ATTR(_name, _reg) DEBUGFS_REG_ATTR(ocx, _name, _reg)
  1016. OCX_DEBUGFS_ATTR(tlk0_ecc_ctl, OCX_TLKX_ECC_CTL(0));
  1017. OCX_DEBUGFS_ATTR(tlk1_ecc_ctl, OCX_TLKX_ECC_CTL(1));
  1018. OCX_DEBUGFS_ATTR(tlk2_ecc_ctl, OCX_TLKX_ECC_CTL(2));
  1019. OCX_DEBUGFS_ATTR(rlk0_ecc_ctl, OCX_RLKX_ECC_CTL(0));
  1020. OCX_DEBUGFS_ATTR(rlk1_ecc_ctl, OCX_RLKX_ECC_CTL(1));
  1021. OCX_DEBUGFS_ATTR(rlk2_ecc_ctl, OCX_RLKX_ECC_CTL(2));
  1022. OCX_DEBUGFS_ATTR(com_link0_int, OCX_COM_LINKX_INT_W1S(0));
  1023. OCX_DEBUGFS_ATTR(com_link1_int, OCX_COM_LINKX_INT_W1S(1));
  1024. OCX_DEBUGFS_ATTR(com_link2_int, OCX_COM_LINKX_INT_W1S(2));
  1025. OCX_DEBUGFS_ATTR(lne00_badcnt, OCX_LNE_BAD_CNT(0));
  1026. OCX_DEBUGFS_ATTR(lne01_badcnt, OCX_LNE_BAD_CNT(1));
  1027. OCX_DEBUGFS_ATTR(lne02_badcnt, OCX_LNE_BAD_CNT(2));
  1028. OCX_DEBUGFS_ATTR(lne03_badcnt, OCX_LNE_BAD_CNT(3));
  1029. OCX_DEBUGFS_ATTR(lne04_badcnt, OCX_LNE_BAD_CNT(4));
  1030. OCX_DEBUGFS_ATTR(lne05_badcnt, OCX_LNE_BAD_CNT(5));
  1031. OCX_DEBUGFS_ATTR(lne06_badcnt, OCX_LNE_BAD_CNT(6));
  1032. OCX_DEBUGFS_ATTR(lne07_badcnt, OCX_LNE_BAD_CNT(7));
  1033. OCX_DEBUGFS_ATTR(lne08_badcnt, OCX_LNE_BAD_CNT(8));
  1034. OCX_DEBUGFS_ATTR(lne09_badcnt, OCX_LNE_BAD_CNT(9));
  1035. OCX_DEBUGFS_ATTR(lne10_badcnt, OCX_LNE_BAD_CNT(10));
  1036. OCX_DEBUGFS_ATTR(lne11_badcnt, OCX_LNE_BAD_CNT(11));
  1037. OCX_DEBUGFS_ATTR(lne12_badcnt, OCX_LNE_BAD_CNT(12));
  1038. OCX_DEBUGFS_ATTR(lne13_badcnt, OCX_LNE_BAD_CNT(13));
  1039. OCX_DEBUGFS_ATTR(lne14_badcnt, OCX_LNE_BAD_CNT(14));
  1040. OCX_DEBUGFS_ATTR(lne15_badcnt, OCX_LNE_BAD_CNT(15));
  1041. OCX_DEBUGFS_ATTR(lne16_badcnt, OCX_LNE_BAD_CNT(16));
  1042. OCX_DEBUGFS_ATTR(lne17_badcnt, OCX_LNE_BAD_CNT(17));
  1043. OCX_DEBUGFS_ATTR(lne18_badcnt, OCX_LNE_BAD_CNT(18));
  1044. OCX_DEBUGFS_ATTR(lne19_badcnt, OCX_LNE_BAD_CNT(19));
  1045. OCX_DEBUGFS_ATTR(lne20_badcnt, OCX_LNE_BAD_CNT(20));
  1046. OCX_DEBUGFS_ATTR(lne21_badcnt, OCX_LNE_BAD_CNT(21));
  1047. OCX_DEBUGFS_ATTR(lne22_badcnt, OCX_LNE_BAD_CNT(22));
  1048. OCX_DEBUGFS_ATTR(lne23_badcnt, OCX_LNE_BAD_CNT(23));
  1049. OCX_DEBUGFS_ATTR(com_int, OCX_COM_INT_W1S);
  1050. struct debugfs_entry *ocx_dfs_ents[] = {
  1051. &debugfs_tlk0_ecc_ctl,
  1052. &debugfs_tlk1_ecc_ctl,
  1053. &debugfs_tlk2_ecc_ctl,
  1054. &debugfs_rlk0_ecc_ctl,
  1055. &debugfs_rlk1_ecc_ctl,
  1056. &debugfs_rlk2_ecc_ctl,
  1057. &debugfs_com_link0_int,
  1058. &debugfs_com_link1_int,
  1059. &debugfs_com_link2_int,
  1060. &debugfs_lne00_badcnt,
  1061. &debugfs_lne01_badcnt,
  1062. &debugfs_lne02_badcnt,
  1063. &debugfs_lne03_badcnt,
  1064. &debugfs_lne04_badcnt,
  1065. &debugfs_lne05_badcnt,
  1066. &debugfs_lne06_badcnt,
  1067. &debugfs_lne07_badcnt,
  1068. &debugfs_lne08_badcnt,
  1069. &debugfs_lne09_badcnt,
  1070. &debugfs_lne10_badcnt,
  1071. &debugfs_lne11_badcnt,
  1072. &debugfs_lne12_badcnt,
  1073. &debugfs_lne13_badcnt,
  1074. &debugfs_lne14_badcnt,
  1075. &debugfs_lne15_badcnt,
  1076. &debugfs_lne16_badcnt,
  1077. &debugfs_lne17_badcnt,
  1078. &debugfs_lne18_badcnt,
  1079. &debugfs_lne19_badcnt,
  1080. &debugfs_lne20_badcnt,
  1081. &debugfs_lne21_badcnt,
  1082. &debugfs_lne22_badcnt,
  1083. &debugfs_lne23_badcnt,
  1084. &debugfs_com_int,
  1085. };
  1086. static const struct pci_device_id thunderx_ocx_pci_tbl[] = {
  1087. { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_OCX) },
  1088. { 0, },
  1089. };
  1090. static void thunderx_ocx_clearstats(struct thunderx_ocx *ocx)
  1091. {
  1092. int lane, stat, cfg;
  1093. for (lane = 0; lane < OCX_RX_LANES; lane++) {
  1094. cfg = readq(ocx->regs + OCX_LNE_CFG(lane));
  1095. cfg |= OCX_LNE_CFG_RX_STAT_RDCLR;
  1096. cfg &= ~OCX_LNE_CFG_RX_STAT_ENA;
  1097. writeq(cfg, ocx->regs + OCX_LNE_CFG(lane));
  1098. for (stat = 0; stat < OCX_RX_LANE_STATS; stat++)
  1099. readq(ocx->regs + OCX_LNE_STAT(lane, stat));
  1100. }
  1101. }
  1102. static int thunderx_ocx_probe(struct pci_dev *pdev,
  1103. const struct pci_device_id *id)
  1104. {
  1105. struct thunderx_ocx *ocx;
  1106. struct edac_device_ctl_info *edac_dev;
  1107. char name[32];
  1108. int idx;
  1109. int i;
  1110. int ret;
  1111. u64 reg;
  1112. ret = pcim_enable_device(pdev);
  1113. if (ret) {
  1114. dev_err(&pdev->dev, "Cannot enable PCI device: %d\n", ret);
  1115. return ret;
  1116. }
  1117. ret = pcim_iomap_regions(pdev, BIT(0), "thunderx_ocx");
  1118. if (ret) {
  1119. dev_err(&pdev->dev, "Cannot map PCI resources: %d\n", ret);
  1120. return ret;
  1121. }
  1122. idx = edac_device_alloc_index();
  1123. snprintf(name, sizeof(name), "OCX%d", idx);
  1124. edac_dev = edac_device_alloc_ctl_info(sizeof(struct thunderx_ocx),
  1125. name, 1, "CCPI", 1,
  1126. 0, NULL, 0, idx);
  1127. if (!edac_dev) {
  1128. dev_err(&pdev->dev, "Cannot allocate EDAC device: %d\n", ret);
  1129. return -ENOMEM;
  1130. }
  1131. ocx = edac_dev->pvt_info;
  1132. ocx->edac_dev = edac_dev;
  1133. ocx->com_ring_head = 0;
  1134. ocx->com_ring_tail = 0;
  1135. ocx->link_ring_head = 0;
  1136. ocx->link_ring_tail = 0;
  1137. ocx->regs = pcim_iomap_table(pdev)[0];
  1138. if (!ocx->regs) {
  1139. dev_err(&pdev->dev, "Cannot map PCI resources: %d\n", ret);
  1140. ret = -ENODEV;
  1141. goto err_free;
  1142. }
  1143. ocx->pdev = pdev;
  1144. for (i = 0; i < OCX_INTS; i++) {
  1145. ocx->msix_ent[i].entry = i;
  1146. ocx->msix_ent[i].vector = 0;
  1147. }
  1148. ret = pci_enable_msix_exact(pdev, ocx->msix_ent, OCX_INTS);
  1149. if (ret) {
  1150. dev_err(&pdev->dev, "Cannot enable interrupt: %d\n", ret);
  1151. goto err_free;
  1152. }
  1153. for (i = 0; i < OCX_INTS; i++) {
  1154. ret = devm_request_threaded_irq(&pdev->dev,
  1155. ocx->msix_ent[i].vector,
  1156. (i == 3) ?
  1157. thunderx_ocx_com_isr :
  1158. thunderx_ocx_lnk_isr,
  1159. (i == 3) ?
  1160. thunderx_ocx_com_threaded_isr :
  1161. thunderx_ocx_lnk_threaded_isr,
  1162. 0, "[EDAC] ThunderX OCX",
  1163. &ocx->msix_ent[i]);
  1164. if (ret)
  1165. goto err_free;
  1166. }
  1167. edac_dev->dev = &pdev->dev;
  1168. edac_dev->dev_name = dev_name(&pdev->dev);
  1169. edac_dev->mod_name = "thunderx-ocx";
  1170. edac_dev->ctl_name = "thunderx-ocx";
  1171. ret = edac_device_add_device(edac_dev);
  1172. if (ret) {
  1173. dev_err(&pdev->dev, "Cannot add EDAC device: %d\n", ret);
  1174. goto err_free;
  1175. }
  1176. if (IS_ENABLED(CONFIG_EDAC_DEBUG)) {
  1177. ocx->debugfs = edac_debugfs_create_dir(pdev->dev.kobj.name);
  1178. ret = thunderx_create_debugfs_nodes(ocx->debugfs,
  1179. ocx_dfs_ents,
  1180. ocx,
  1181. ARRAY_SIZE(ocx_dfs_ents));
  1182. if (ret != ARRAY_SIZE(ocx_dfs_ents)) {
  1183. dev_warn(&pdev->dev, "Error creating debugfs entries: %d%s\n",
  1184. ret, ret >= 0 ? " created" : "");
  1185. }
  1186. }
  1187. pci_set_drvdata(pdev, edac_dev);
  1188. thunderx_ocx_clearstats(ocx);
  1189. for (i = 0; i < OCX_RX_LANES; i++) {
  1190. writeq(OCX_LNE_INT_ENA_ALL,
  1191. ocx->regs + OCX_LNE_INT_EN(i));
  1192. reg = readq(ocx->regs + OCX_LNE_INT(i));
  1193. writeq(reg, ocx->regs + OCX_LNE_INT(i));
  1194. }
  1195. for (i = 0; i < OCX_LINK_INTS; i++) {
  1196. reg = readq(ocx->regs + OCX_COM_LINKX_INT(i));
  1197. writeq(reg, ocx->regs + OCX_COM_LINKX_INT(i));
  1198. writeq(OCX_COM_LINKX_INT_ENA_ALL,
  1199. ocx->regs + OCX_COM_LINKX_INT_ENA_W1S(i));
  1200. }
  1201. reg = readq(ocx->regs + OCX_COM_INT);
  1202. writeq(reg, ocx->regs + OCX_COM_INT);
  1203. writeq(OCX_COM_INT_ENA_ALL, ocx->regs + OCX_COM_INT_ENA_W1S);
  1204. return 0;
  1205. err_free:
  1206. edac_device_free_ctl_info(edac_dev);
  1207. return ret;
  1208. }
  1209. static void thunderx_ocx_remove(struct pci_dev *pdev)
  1210. {
  1211. struct edac_device_ctl_info *edac_dev = pci_get_drvdata(pdev);
  1212. struct thunderx_ocx *ocx = edac_dev->pvt_info;
  1213. int i;
  1214. writeq(OCX_COM_INT_ENA_ALL, ocx->regs + OCX_COM_INT_ENA_W1C);
  1215. for (i = 0; i < OCX_INTS; i++) {
  1216. writeq(OCX_COM_LINKX_INT_ENA_ALL,
  1217. ocx->regs + OCX_COM_LINKX_INT_ENA_W1C(i));
  1218. }
  1219. edac_debugfs_remove_recursive(ocx->debugfs);
  1220. edac_device_del_device(&pdev->dev);
  1221. edac_device_free_ctl_info(edac_dev);
  1222. }
  1223. MODULE_DEVICE_TABLE(pci, thunderx_ocx_pci_tbl);
  1224. static struct pci_driver thunderx_ocx_driver = {
  1225. .name = "thunderx_ocx_edac",
  1226. .probe = thunderx_ocx_probe,
  1227. .remove = thunderx_ocx_remove,
  1228. .id_table = thunderx_ocx_pci_tbl,
  1229. };
  1230. /*---------------------- L2C driver ---------------------------------*/
  1231. #define PCI_DEVICE_ID_THUNDER_L2C_TAD 0xa02e
  1232. #define PCI_DEVICE_ID_THUNDER_L2C_CBC 0xa02f
  1233. #define PCI_DEVICE_ID_THUNDER_L2C_MCI 0xa030
  1234. #define L2C_TAD_INT_W1C 0x40000
  1235. #define L2C_TAD_INT_W1S 0x40008
  1236. #define L2C_TAD_INT_ENA_W1C 0x40020
  1237. #define L2C_TAD_INT_ENA_W1S 0x40028
  1238. #define L2C_TAD_INT_L2DDBE BIT(1)
  1239. #define L2C_TAD_INT_SBFSBE BIT(2)
  1240. #define L2C_TAD_INT_SBFDBE BIT(3)
  1241. #define L2C_TAD_INT_FBFSBE BIT(4)
  1242. #define L2C_TAD_INT_FBFDBE BIT(5)
  1243. #define L2C_TAD_INT_TAGDBE BIT(9)
  1244. #define L2C_TAD_INT_RDDISLMC BIT(15)
  1245. #define L2C_TAD_INT_WRDISLMC BIT(16)
  1246. #define L2C_TAD_INT_LFBTO BIT(17)
  1247. #define L2C_TAD_INT_GSYNCTO BIT(18)
  1248. #define L2C_TAD_INT_RTGSBE BIT(32)
  1249. #define L2C_TAD_INT_RTGDBE BIT(33)
  1250. #define L2C_TAD_INT_RDDISOCI BIT(34)
  1251. #define L2C_TAD_INT_WRDISOCI BIT(35)
  1252. #define L2C_TAD_INT_ECC (L2C_TAD_INT_L2DDBE | \
  1253. L2C_TAD_INT_SBFSBE | L2C_TAD_INT_SBFDBE | \
  1254. L2C_TAD_INT_FBFSBE | L2C_TAD_INT_FBFDBE)
  1255. #define L2C_TAD_INT_CE (L2C_TAD_INT_SBFSBE | \
  1256. L2C_TAD_INT_FBFSBE)
  1257. #define L2C_TAD_INT_UE (L2C_TAD_INT_L2DDBE | \
  1258. L2C_TAD_INT_SBFDBE | \
  1259. L2C_TAD_INT_FBFDBE | \
  1260. L2C_TAD_INT_TAGDBE | \
  1261. L2C_TAD_INT_RTGDBE | \
  1262. L2C_TAD_INT_WRDISOCI | \
  1263. L2C_TAD_INT_RDDISOCI | \
  1264. L2C_TAD_INT_WRDISLMC | \
  1265. L2C_TAD_INT_RDDISLMC | \
  1266. L2C_TAD_INT_LFBTO | \
  1267. L2C_TAD_INT_GSYNCTO)
  1268. static const struct error_descr l2_tad_errors[] = {
  1269. {
  1270. .type = ERR_CORRECTED,
  1271. .mask = L2C_TAD_INT_SBFSBE,
  1272. .descr = "SBF single-bit error",
  1273. },
  1274. {
  1275. .type = ERR_CORRECTED,
  1276. .mask = L2C_TAD_INT_FBFSBE,
  1277. .descr = "FBF single-bit error",
  1278. },
  1279. {
  1280. .type = ERR_UNCORRECTED,
  1281. .mask = L2C_TAD_INT_L2DDBE,
  1282. .descr = "L2D double-bit error",
  1283. },
  1284. {
  1285. .type = ERR_UNCORRECTED,
  1286. .mask = L2C_TAD_INT_SBFDBE,
  1287. .descr = "SBF double-bit error",
  1288. },
  1289. {
  1290. .type = ERR_UNCORRECTED,
  1291. .mask = L2C_TAD_INT_FBFDBE,
  1292. .descr = "FBF double-bit error",
  1293. },
  1294. {
  1295. .type = ERR_UNCORRECTED,
  1296. .mask = L2C_TAD_INT_TAGDBE,
  1297. .descr = "TAG double-bit error",
  1298. },
  1299. {
  1300. .type = ERR_UNCORRECTED,
  1301. .mask = L2C_TAD_INT_RTGDBE,
  1302. .descr = "RTG double-bit error",
  1303. },
  1304. {
  1305. .type = ERR_UNCORRECTED,
  1306. .mask = L2C_TAD_INT_WRDISOCI,
  1307. .descr = "Write to a disabled CCPI",
  1308. },
  1309. {
  1310. .type = ERR_UNCORRECTED,
  1311. .mask = L2C_TAD_INT_RDDISOCI,
  1312. .descr = "Read from a disabled CCPI",
  1313. },
  1314. {
  1315. .type = ERR_UNCORRECTED,
  1316. .mask = L2C_TAD_INT_WRDISLMC,
  1317. .descr = "Write to a disabled LMC",
  1318. },
  1319. {
  1320. .type = ERR_UNCORRECTED,
  1321. .mask = L2C_TAD_INT_RDDISLMC,
  1322. .descr = "Read from a disabled LMC",
  1323. },
  1324. {
  1325. .type = ERR_UNCORRECTED,
  1326. .mask = L2C_TAD_INT_LFBTO,
  1327. .descr = "LFB entry timeout",
  1328. },
  1329. {
  1330. .type = ERR_UNCORRECTED,
  1331. .mask = L2C_TAD_INT_GSYNCTO,
  1332. .descr = "Global sync CCPI timeout",
  1333. },
  1334. {0, 0, NULL},
  1335. };
  1336. #define L2C_TAD_INT_TAG (L2C_TAD_INT_TAGDBE)
  1337. #define L2C_TAD_INT_RTG (L2C_TAD_INT_RTGDBE)
  1338. #define L2C_TAD_INT_DISLMC (L2C_TAD_INT_WRDISLMC | L2C_TAD_INT_RDDISLMC)
  1339. #define L2C_TAD_INT_DISOCI (L2C_TAD_INT_WRDISOCI | L2C_TAD_INT_RDDISOCI)
  1340. #define L2C_TAD_INT_ENA_ALL (L2C_TAD_INT_ECC | L2C_TAD_INT_TAG | \
  1341. L2C_TAD_INT_RTG | \
  1342. L2C_TAD_INT_DISLMC | L2C_TAD_INT_DISOCI | \
  1343. L2C_TAD_INT_LFBTO)
  1344. #define L2C_TAD_TIMETWO 0x50000
  1345. #define L2C_TAD_TIMEOUT 0x50100
  1346. #define L2C_TAD_ERR 0x60000
  1347. #define L2C_TAD_TQD_ERR 0x60100
  1348. #define L2C_TAD_TTG_ERR 0x60200
  1349. #define L2C_CBC_INT_W1C 0x60000
  1350. #define L2C_CBC_INT_RSDSBE BIT(0)
  1351. #define L2C_CBC_INT_RSDDBE BIT(1)
  1352. #define L2C_CBC_INT_RSD (L2C_CBC_INT_RSDSBE | L2C_CBC_INT_RSDDBE)
  1353. #define L2C_CBC_INT_MIBSBE BIT(4)
  1354. #define L2C_CBC_INT_MIBDBE BIT(5)
  1355. #define L2C_CBC_INT_MIB (L2C_CBC_INT_MIBSBE | L2C_CBC_INT_MIBDBE)
  1356. #define L2C_CBC_INT_IORDDISOCI BIT(6)
  1357. #define L2C_CBC_INT_IOWRDISOCI BIT(7)
  1358. #define L2C_CBC_INT_IODISOCI (L2C_CBC_INT_IORDDISOCI | \
  1359. L2C_CBC_INT_IOWRDISOCI)
  1360. #define L2C_CBC_INT_CE (L2C_CBC_INT_RSDSBE | L2C_CBC_INT_MIBSBE)
  1361. #define L2C_CBC_INT_UE (L2C_CBC_INT_RSDDBE | L2C_CBC_INT_MIBDBE)
  1362. static const struct error_descr l2_cbc_errors[] = {
  1363. {
  1364. .type = ERR_CORRECTED,
  1365. .mask = L2C_CBC_INT_RSDSBE,
  1366. .descr = "RSD single-bit error",
  1367. },
  1368. {
  1369. .type = ERR_CORRECTED,
  1370. .mask = L2C_CBC_INT_MIBSBE,
  1371. .descr = "MIB single-bit error",
  1372. },
  1373. {
  1374. .type = ERR_UNCORRECTED,
  1375. .mask = L2C_CBC_INT_RSDDBE,
  1376. .descr = "RSD double-bit error",
  1377. },
  1378. {
  1379. .type = ERR_UNCORRECTED,
  1380. .mask = L2C_CBC_INT_MIBDBE,
  1381. .descr = "MIB double-bit error",
  1382. },
  1383. {
  1384. .type = ERR_UNCORRECTED,
  1385. .mask = L2C_CBC_INT_IORDDISOCI,
  1386. .descr = "Read from a disabled CCPI",
  1387. },
  1388. {
  1389. .type = ERR_UNCORRECTED,
  1390. .mask = L2C_CBC_INT_IOWRDISOCI,
  1391. .descr = "Write to a disabled CCPI",
  1392. },
  1393. {0, 0, NULL},
  1394. };
  1395. #define L2C_CBC_INT_W1S 0x60008
  1396. #define L2C_CBC_INT_ENA_W1C 0x60020
  1397. #define L2C_CBC_INT_ENA_ALL (L2C_CBC_INT_RSD | L2C_CBC_INT_MIB | \
  1398. L2C_CBC_INT_IODISOCI)
  1399. #define L2C_CBC_INT_ENA_W1S 0x60028
  1400. #define L2C_CBC_IODISOCIERR 0x80008
  1401. #define L2C_CBC_IOCERR 0x80010
  1402. #define L2C_CBC_RSDERR 0x80018
  1403. #define L2C_CBC_MIBERR 0x80020
  1404. #define L2C_MCI_INT_W1C 0x0
  1405. #define L2C_MCI_INT_VBFSBE BIT(0)
  1406. #define L2C_MCI_INT_VBFDBE BIT(1)
  1407. static const struct error_descr l2_mci_errors[] = {
  1408. {
  1409. .type = ERR_CORRECTED,
  1410. .mask = L2C_MCI_INT_VBFSBE,
  1411. .descr = "VBF single-bit error",
  1412. },
  1413. {
  1414. .type = ERR_UNCORRECTED,
  1415. .mask = L2C_MCI_INT_VBFDBE,
  1416. .descr = "VBF double-bit error",
  1417. },
  1418. {0, 0, NULL},
  1419. };
  1420. #define L2C_MCI_INT_W1S 0x8
  1421. #define L2C_MCI_INT_ENA_W1C 0x20
  1422. #define L2C_MCI_INT_ENA_ALL (L2C_MCI_INT_VBFSBE | L2C_MCI_INT_VBFDBE)
  1423. #define L2C_MCI_INT_ENA_W1S 0x28
  1424. #define L2C_MCI_ERR 0x10000
  1425. #define L2C_MESSAGE_SIZE SZ_1K
  1426. #define L2C_OTHER_SIZE (50 * ARRAY_SIZE(l2_tad_errors))
  1427. struct l2c_err_ctx {
  1428. char *reg_ext_name;
  1429. u64 reg_int;
  1430. u64 reg_ext;
  1431. };
  1432. struct thunderx_l2c {
  1433. void __iomem *regs;
  1434. struct pci_dev *pdev;
  1435. struct edac_device_ctl_info *edac_dev;
  1436. struct dentry *debugfs;
  1437. int index;
  1438. struct msix_entry msix_ent;
  1439. struct l2c_err_ctx err_ctx[RING_ENTRIES];
  1440. unsigned long ring_head;
  1441. unsigned long ring_tail;
  1442. };
  1443. static irqreturn_t thunderx_l2c_tad_isr(int irq, void *irq_id)
  1444. {
  1445. struct msix_entry *msix = irq_id;
  1446. struct thunderx_l2c *tad = container_of(msix, struct thunderx_l2c,
  1447. msix_ent);
  1448. unsigned long head = ring_pos(tad->ring_head, ARRAY_SIZE(tad->err_ctx));
  1449. struct l2c_err_ctx *ctx = &tad->err_ctx[head];
  1450. ctx->reg_int = readq(tad->regs + L2C_TAD_INT_W1C);
  1451. if (ctx->reg_int & L2C_TAD_INT_ECC) {
  1452. ctx->reg_ext_name = "TQD_ERR";
  1453. ctx->reg_ext = readq(tad->regs + L2C_TAD_TQD_ERR);
  1454. } else if (ctx->reg_int & L2C_TAD_INT_TAG) {
  1455. ctx->reg_ext_name = "TTG_ERR";
  1456. ctx->reg_ext = readq(tad->regs + L2C_TAD_TTG_ERR);
  1457. } else if (ctx->reg_int & L2C_TAD_INT_LFBTO) {
  1458. ctx->reg_ext_name = "TIMEOUT";
  1459. ctx->reg_ext = readq(tad->regs + L2C_TAD_TIMEOUT);
  1460. } else if (ctx->reg_int & L2C_TAD_INT_DISOCI) {
  1461. ctx->reg_ext_name = "ERR";
  1462. ctx->reg_ext = readq(tad->regs + L2C_TAD_ERR);
  1463. }
  1464. writeq(ctx->reg_int, tad->regs + L2C_TAD_INT_W1C);
  1465. tad->ring_head++;
  1466. return IRQ_WAKE_THREAD;
  1467. }
  1468. static irqreturn_t thunderx_l2c_cbc_isr(int irq, void *irq_id)
  1469. {
  1470. struct msix_entry *msix = irq_id;
  1471. struct thunderx_l2c *cbc = container_of(msix, struct thunderx_l2c,
  1472. msix_ent);
  1473. unsigned long head = ring_pos(cbc->ring_head, ARRAY_SIZE(cbc->err_ctx));
  1474. struct l2c_err_ctx *ctx = &cbc->err_ctx[head];
  1475. ctx->reg_int = readq(cbc->regs + L2C_CBC_INT_W1C);
  1476. if (ctx->reg_int & L2C_CBC_INT_RSD) {
  1477. ctx->reg_ext_name = "RSDERR";
  1478. ctx->reg_ext = readq(cbc->regs + L2C_CBC_RSDERR);
  1479. } else if (ctx->reg_int & L2C_CBC_INT_MIB) {
  1480. ctx->reg_ext_name = "MIBERR";
  1481. ctx->reg_ext = readq(cbc->regs + L2C_CBC_MIBERR);
  1482. } else if (ctx->reg_int & L2C_CBC_INT_IODISOCI) {
  1483. ctx->reg_ext_name = "IODISOCIERR";
  1484. ctx->reg_ext = readq(cbc->regs + L2C_CBC_IODISOCIERR);
  1485. }
  1486. writeq(ctx->reg_int, cbc->regs + L2C_CBC_INT_W1C);
  1487. cbc->ring_head++;
  1488. return IRQ_WAKE_THREAD;
  1489. }
  1490. static irqreturn_t thunderx_l2c_mci_isr(int irq, void *irq_id)
  1491. {
  1492. struct msix_entry *msix = irq_id;
  1493. struct thunderx_l2c *mci = container_of(msix, struct thunderx_l2c,
  1494. msix_ent);
  1495. unsigned long head = ring_pos(mci->ring_head, ARRAY_SIZE(mci->err_ctx));
  1496. struct l2c_err_ctx *ctx = &mci->err_ctx[head];
  1497. ctx->reg_int = readq(mci->regs + L2C_MCI_INT_W1C);
  1498. ctx->reg_ext = readq(mci->regs + L2C_MCI_ERR);
  1499. writeq(ctx->reg_int, mci->regs + L2C_MCI_INT_W1C);
  1500. ctx->reg_ext_name = "ERR";
  1501. mci->ring_head++;
  1502. return IRQ_WAKE_THREAD;
  1503. }
  1504. static irqreturn_t thunderx_l2c_threaded_isr(int irq, void *irq_id)
  1505. {
  1506. struct msix_entry *msix = irq_id;
  1507. struct thunderx_l2c *l2c = container_of(msix, struct thunderx_l2c,
  1508. msix_ent);
  1509. unsigned long tail = ring_pos(l2c->ring_tail, ARRAY_SIZE(l2c->err_ctx));
  1510. struct l2c_err_ctx *ctx = &l2c->err_ctx[tail];
  1511. irqreturn_t ret = IRQ_NONE;
  1512. u64 mask_ue, mask_ce;
  1513. const struct error_descr *l2_errors;
  1514. char *reg_int_name;
  1515. char *msg;
  1516. char *other;
  1517. msg = kmalloc(OCX_MESSAGE_SIZE, GFP_KERNEL);
  1518. other = kmalloc(OCX_OTHER_SIZE, GFP_KERNEL);
  1519. if (!msg || !other)
  1520. goto err_free;
  1521. switch (l2c->pdev->device) {
  1522. case PCI_DEVICE_ID_THUNDER_L2C_TAD:
  1523. reg_int_name = "L2C_TAD_INT";
  1524. mask_ue = L2C_TAD_INT_UE;
  1525. mask_ce = L2C_TAD_INT_CE;
  1526. l2_errors = l2_tad_errors;
  1527. break;
  1528. case PCI_DEVICE_ID_THUNDER_L2C_CBC:
  1529. reg_int_name = "L2C_CBC_INT";
  1530. mask_ue = L2C_CBC_INT_UE;
  1531. mask_ce = L2C_CBC_INT_CE;
  1532. l2_errors = l2_cbc_errors;
  1533. break;
  1534. case PCI_DEVICE_ID_THUNDER_L2C_MCI:
  1535. reg_int_name = "L2C_MCI_INT";
  1536. mask_ue = L2C_MCI_INT_VBFDBE;
  1537. mask_ce = L2C_MCI_INT_VBFSBE;
  1538. l2_errors = l2_mci_errors;
  1539. break;
  1540. default:
  1541. dev_err(&l2c->pdev->dev, "Unsupported device: %04x\n",
  1542. l2c->pdev->device);
  1543. goto err_free;
  1544. }
  1545. while (CIRC_CNT(l2c->ring_head, l2c->ring_tail,
  1546. ARRAY_SIZE(l2c->err_ctx))) {
  1547. snprintf(msg, L2C_MESSAGE_SIZE,
  1548. "%s: %s: %016llx, %s: %016llx",
  1549. l2c->edac_dev->ctl_name, reg_int_name, ctx->reg_int,
  1550. ctx->reg_ext_name, ctx->reg_ext);
  1551. decode_register(other, L2C_OTHER_SIZE, l2_errors, ctx->reg_int);
  1552. strncat(msg, other, L2C_MESSAGE_SIZE);
  1553. if (ctx->reg_int & mask_ue)
  1554. edac_device_handle_ue(l2c->edac_dev, 0, 0, msg);
  1555. else if (ctx->reg_int & mask_ce)
  1556. edac_device_handle_ce(l2c->edac_dev, 0, 0, msg);
  1557. l2c->ring_tail++;
  1558. }
  1559. ret = IRQ_HANDLED;
  1560. err_free:
  1561. kfree(other);
  1562. kfree(msg);
  1563. return ret;
  1564. }
  1565. #define L2C_DEBUGFS_ATTR(_name, _reg) DEBUGFS_REG_ATTR(l2c, _name, _reg)
  1566. L2C_DEBUGFS_ATTR(tad_int, L2C_TAD_INT_W1S);
  1567. struct debugfs_entry *l2c_tad_dfs_ents[] = {
  1568. &debugfs_tad_int,
  1569. };
  1570. L2C_DEBUGFS_ATTR(cbc_int, L2C_CBC_INT_W1S);
  1571. struct debugfs_entry *l2c_cbc_dfs_ents[] = {
  1572. &debugfs_cbc_int,
  1573. };
  1574. L2C_DEBUGFS_ATTR(mci_int, L2C_MCI_INT_W1S);
  1575. struct debugfs_entry *l2c_mci_dfs_ents[] = {
  1576. &debugfs_mci_int,
  1577. };
  1578. static const struct pci_device_id thunderx_l2c_pci_tbl[] = {
  1579. { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_L2C_TAD), },
  1580. { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_L2C_CBC), },
  1581. { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_L2C_MCI), },
  1582. { 0, },
  1583. };
  1584. static int thunderx_l2c_probe(struct pci_dev *pdev,
  1585. const struct pci_device_id *id)
  1586. {
  1587. struct thunderx_l2c *l2c;
  1588. struct edac_device_ctl_info *edac_dev;
  1589. struct debugfs_entry **l2c_devattr;
  1590. size_t dfs_entries;
  1591. irqreturn_t (*thunderx_l2c_isr)(int, void *) = NULL;
  1592. char name[32];
  1593. const char *fmt;
  1594. u64 reg_en_offs, reg_en_mask;
  1595. int idx;
  1596. int ret;
  1597. ret = pcim_enable_device(pdev);
  1598. if (ret) {
  1599. dev_err(&pdev->dev, "Cannot enable PCI device: %d\n", ret);
  1600. return ret;
  1601. }
  1602. ret = pcim_iomap_regions(pdev, BIT(0), "thunderx_l2c");
  1603. if (ret) {
  1604. dev_err(&pdev->dev, "Cannot map PCI resources: %d\n", ret);
  1605. return ret;
  1606. }
  1607. switch (pdev->device) {
  1608. case PCI_DEVICE_ID_THUNDER_L2C_TAD:
  1609. thunderx_l2c_isr = thunderx_l2c_tad_isr;
  1610. l2c_devattr = l2c_tad_dfs_ents;
  1611. dfs_entries = ARRAY_SIZE(l2c_tad_dfs_ents);
  1612. fmt = "L2C-TAD%d";
  1613. reg_en_offs = L2C_TAD_INT_ENA_W1S;
  1614. reg_en_mask = L2C_TAD_INT_ENA_ALL;
  1615. break;
  1616. case PCI_DEVICE_ID_THUNDER_L2C_CBC:
  1617. thunderx_l2c_isr = thunderx_l2c_cbc_isr;
  1618. l2c_devattr = l2c_cbc_dfs_ents;
  1619. dfs_entries = ARRAY_SIZE(l2c_cbc_dfs_ents);
  1620. fmt = "L2C-CBC%d";
  1621. reg_en_offs = L2C_CBC_INT_ENA_W1S;
  1622. reg_en_mask = L2C_CBC_INT_ENA_ALL;
  1623. break;
  1624. case PCI_DEVICE_ID_THUNDER_L2C_MCI:
  1625. thunderx_l2c_isr = thunderx_l2c_mci_isr;
  1626. l2c_devattr = l2c_mci_dfs_ents;
  1627. dfs_entries = ARRAY_SIZE(l2c_mci_dfs_ents);
  1628. fmt = "L2C-MCI%d";
  1629. reg_en_offs = L2C_MCI_INT_ENA_W1S;
  1630. reg_en_mask = L2C_MCI_INT_ENA_ALL;
  1631. break;
  1632. default:
  1633. //Should never ever get here
  1634. dev_err(&pdev->dev, "Unsupported PCI device: %04x\n",
  1635. pdev->device);
  1636. return -EINVAL;
  1637. }
  1638. idx = edac_device_alloc_index();
  1639. snprintf(name, sizeof(name), fmt, idx);
  1640. edac_dev = edac_device_alloc_ctl_info(sizeof(struct thunderx_l2c),
  1641. name, 1, "L2C", 1, 0,
  1642. NULL, 0, idx);
  1643. if (!edac_dev) {
  1644. dev_err(&pdev->dev, "Cannot allocate EDAC device\n");
  1645. return -ENOMEM;
  1646. }
  1647. l2c = edac_dev->pvt_info;
  1648. l2c->edac_dev = edac_dev;
  1649. l2c->regs = pcim_iomap_table(pdev)[0];
  1650. if (!l2c->regs) {
  1651. dev_err(&pdev->dev, "Cannot map PCI resources\n");
  1652. ret = -ENODEV;
  1653. goto err_free;
  1654. }
  1655. l2c->pdev = pdev;
  1656. l2c->ring_head = 0;
  1657. l2c->ring_tail = 0;
  1658. l2c->msix_ent.entry = 0;
  1659. l2c->msix_ent.vector = 0;
  1660. ret = pci_enable_msix_exact(pdev, &l2c->msix_ent, 1);
  1661. if (ret) {
  1662. dev_err(&pdev->dev, "Cannot enable interrupt: %d\n", ret);
  1663. goto err_free;
  1664. }
  1665. ret = devm_request_threaded_irq(&pdev->dev, l2c->msix_ent.vector,
  1666. thunderx_l2c_isr,
  1667. thunderx_l2c_threaded_isr,
  1668. 0, "[EDAC] ThunderX L2C",
  1669. &l2c->msix_ent);
  1670. if (ret)
  1671. goto err_free;
  1672. edac_dev->dev = &pdev->dev;
  1673. edac_dev->dev_name = dev_name(&pdev->dev);
  1674. edac_dev->mod_name = "thunderx-l2c";
  1675. edac_dev->ctl_name = "thunderx-l2c";
  1676. ret = edac_device_add_device(edac_dev);
  1677. if (ret) {
  1678. dev_err(&pdev->dev, "Cannot add EDAC device: %d\n", ret);
  1679. goto err_free;
  1680. }
  1681. if (IS_ENABLED(CONFIG_EDAC_DEBUG)) {
  1682. l2c->debugfs = edac_debugfs_create_dir(pdev->dev.kobj.name);
  1683. ret = thunderx_create_debugfs_nodes(l2c->debugfs, l2c_devattr,
  1684. l2c, dfs_entries);
  1685. if (ret != dfs_entries) {
  1686. dev_warn(&pdev->dev, "Error creating debugfs entries: %d%s\n",
  1687. ret, ret >= 0 ? " created" : "");
  1688. }
  1689. }
  1690. pci_set_drvdata(pdev, edac_dev);
  1691. writeq(reg_en_mask, l2c->regs + reg_en_offs);
  1692. return 0;
  1693. err_free:
  1694. edac_device_free_ctl_info(edac_dev);
  1695. return ret;
  1696. }
  1697. static void thunderx_l2c_remove(struct pci_dev *pdev)
  1698. {
  1699. struct edac_device_ctl_info *edac_dev = pci_get_drvdata(pdev);
  1700. struct thunderx_l2c *l2c = edac_dev->pvt_info;
  1701. switch (pdev->device) {
  1702. case PCI_DEVICE_ID_THUNDER_L2C_TAD:
  1703. writeq(L2C_TAD_INT_ENA_ALL, l2c->regs + L2C_TAD_INT_ENA_W1C);
  1704. break;
  1705. case PCI_DEVICE_ID_THUNDER_L2C_CBC:
  1706. writeq(L2C_CBC_INT_ENA_ALL, l2c->regs + L2C_CBC_INT_ENA_W1C);
  1707. break;
  1708. case PCI_DEVICE_ID_THUNDER_L2C_MCI:
  1709. writeq(L2C_MCI_INT_ENA_ALL, l2c->regs + L2C_MCI_INT_ENA_W1C);
  1710. break;
  1711. }
  1712. edac_debugfs_remove_recursive(l2c->debugfs);
  1713. edac_device_del_device(&pdev->dev);
  1714. edac_device_free_ctl_info(edac_dev);
  1715. }
  1716. MODULE_DEVICE_TABLE(pci, thunderx_l2c_pci_tbl);
  1717. static struct pci_driver thunderx_l2c_driver = {
  1718. .name = "thunderx_l2c_edac",
  1719. .probe = thunderx_l2c_probe,
  1720. .remove = thunderx_l2c_remove,
  1721. .id_table = thunderx_l2c_pci_tbl,
  1722. };
  1723. static int __init thunderx_edac_init(void)
  1724. {
  1725. int rc = 0;
  1726. rc = pci_register_driver(&thunderx_lmc_driver);
  1727. if (rc)
  1728. return rc;
  1729. rc = pci_register_driver(&thunderx_ocx_driver);
  1730. if (rc)
  1731. goto err_lmc;
  1732. rc = pci_register_driver(&thunderx_l2c_driver);
  1733. if (rc)
  1734. goto err_ocx;
  1735. return rc;
  1736. err_ocx:
  1737. pci_unregister_driver(&thunderx_ocx_driver);
  1738. err_lmc:
  1739. pci_unregister_driver(&thunderx_lmc_driver);
  1740. return rc;
  1741. }
  1742. static void __exit thunderx_edac_exit(void)
  1743. {
  1744. pci_unregister_driver(&thunderx_l2c_driver);
  1745. pci_unregister_driver(&thunderx_ocx_driver);
  1746. pci_unregister_driver(&thunderx_lmc_driver);
  1747. }
  1748. module_init(thunderx_edac_init);
  1749. module_exit(thunderx_edac_exit);
  1750. MODULE_LICENSE("GPL v2");
  1751. MODULE_AUTHOR("Cavium, Inc.");
  1752. MODULE_DESCRIPTION("EDAC Driver for Cavium ThunderX");