mv64x60_edac.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886
  1. /*
  2. * Marvell MV64x60 Memory Controller kernel module for PPC platforms
  3. *
  4. * Author: Dave Jiang <djiang@mvista.com>
  5. *
  6. * 2006-2007 (c) MontaVista Software, Inc. This file is licensed under
  7. * the terms of the GNU General Public License version 2. This program
  8. * is licensed "as is" without any warranty of any kind, whether express
  9. * or implied.
  10. *
  11. */
  12. #include <linux/module.h>
  13. #include <linux/init.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/io.h>
  16. #include <linux/edac.h>
  17. #include <linux/gfp.h>
  18. #include "edac_core.h"
  19. #include "edac_module.h"
  20. #include "mv64x60_edac.h"
  21. static const char *mv64x60_ctl_name = "MV64x60";
  22. static int edac_dev_idx;
  23. static int edac_pci_idx;
  24. static int edac_mc_idx;
  25. /*********************** PCI err device **********************************/
  26. #ifdef CONFIG_PCI
  27. static void mv64x60_pci_check(struct edac_pci_ctl_info *pci)
  28. {
  29. struct mv64x60_pci_pdata *pdata = pci->pvt_info;
  30. u32 cause;
  31. cause = in_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_CAUSE);
  32. if (!cause)
  33. return;
  34. printk(KERN_ERR "Error in PCI %d Interface\n", pdata->pci_hose);
  35. printk(KERN_ERR "Cause register: 0x%08x\n", cause);
  36. printk(KERN_ERR "Address Low: 0x%08x\n",
  37. in_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_ADDR_LO));
  38. printk(KERN_ERR "Address High: 0x%08x\n",
  39. in_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_ADDR_HI));
  40. printk(KERN_ERR "Attribute: 0x%08x\n",
  41. in_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_ATTR));
  42. printk(KERN_ERR "Command: 0x%08x\n",
  43. in_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_CMD));
  44. out_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_CAUSE, ~cause);
  45. if (cause & MV64X60_PCI_PE_MASK)
  46. edac_pci_handle_pe(pci, pci->ctl_name);
  47. if (!(cause & MV64X60_PCI_PE_MASK))
  48. edac_pci_handle_npe(pci, pci->ctl_name);
  49. }
  50. static irqreturn_t mv64x60_pci_isr(int irq, void *dev_id)
  51. {
  52. struct edac_pci_ctl_info *pci = dev_id;
  53. struct mv64x60_pci_pdata *pdata = pci->pvt_info;
  54. u32 val;
  55. val = in_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_CAUSE);
  56. if (!val)
  57. return IRQ_NONE;
  58. mv64x60_pci_check(pci);
  59. return IRQ_HANDLED;
  60. }
  61. /*
  62. * Bit 0 of MV64x60_PCIx_ERR_MASK does not exist on the 64360 and because of
  63. * errata FEr-#11 and FEr-##16 for the 64460, it should be 0 on that chip as
  64. * well. IOW, don't set bit 0.
  65. */
  66. /* Erratum FEr PCI-#16: clear bit 0 of PCI SERRn Mask reg. */
  67. static int __init mv64x60_pci_fixup(struct platform_device *pdev)
  68. {
  69. struct resource *r;
  70. void __iomem *pci_serr;
  71. r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
  72. if (!r) {
  73. printk(KERN_ERR "%s: Unable to get resource for "
  74. "PCI err regs\n", __func__);
  75. return -ENOENT;
  76. }
  77. pci_serr = ioremap(r->start, resource_size(r));
  78. if (!pci_serr)
  79. return -ENOMEM;
  80. out_le32(pci_serr, in_le32(pci_serr) & ~0x1);
  81. iounmap(pci_serr);
  82. return 0;
  83. }
  84. static int mv64x60_pci_err_probe(struct platform_device *pdev)
  85. {
  86. struct edac_pci_ctl_info *pci;
  87. struct mv64x60_pci_pdata *pdata;
  88. struct resource *r;
  89. int res = 0;
  90. if (!devres_open_group(&pdev->dev, mv64x60_pci_err_probe, GFP_KERNEL))
  91. return -ENOMEM;
  92. pci = edac_pci_alloc_ctl_info(sizeof(*pdata), "mv64x60_pci_err");
  93. if (!pci)
  94. return -ENOMEM;
  95. pdata = pci->pvt_info;
  96. pdata->pci_hose = pdev->id;
  97. pdata->name = "mpc85xx_pci_err";
  98. platform_set_drvdata(pdev, pci);
  99. pci->dev = &pdev->dev;
  100. pci->dev_name = dev_name(&pdev->dev);
  101. pci->mod_name = EDAC_MOD_STR;
  102. pci->ctl_name = pdata->name;
  103. if (edac_op_state == EDAC_OPSTATE_POLL)
  104. pci->edac_check = mv64x60_pci_check;
  105. pdata->edac_idx = edac_pci_idx++;
  106. r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  107. if (!r) {
  108. printk(KERN_ERR "%s: Unable to get resource for "
  109. "PCI err regs\n", __func__);
  110. res = -ENOENT;
  111. goto err;
  112. }
  113. if (!devm_request_mem_region(&pdev->dev,
  114. r->start,
  115. resource_size(r),
  116. pdata->name)) {
  117. printk(KERN_ERR "%s: Error while requesting mem region\n",
  118. __func__);
  119. res = -EBUSY;
  120. goto err;
  121. }
  122. pdata->pci_vbase = devm_ioremap(&pdev->dev,
  123. r->start,
  124. resource_size(r));
  125. if (!pdata->pci_vbase) {
  126. printk(KERN_ERR "%s: Unable to setup PCI err regs\n", __func__);
  127. res = -ENOMEM;
  128. goto err;
  129. }
  130. res = mv64x60_pci_fixup(pdev);
  131. if (res < 0) {
  132. printk(KERN_ERR "%s: PCI fixup failed\n", __func__);
  133. goto err;
  134. }
  135. out_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_CAUSE, 0);
  136. out_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_MASK, 0);
  137. out_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_MASK,
  138. MV64X60_PCIx_ERR_MASK_VAL);
  139. if (edac_pci_add_device(pci, pdata->edac_idx) > 0) {
  140. edac_dbg(3, "failed edac_pci_add_device()\n");
  141. goto err;
  142. }
  143. if (edac_op_state == EDAC_OPSTATE_INT) {
  144. pdata->irq = platform_get_irq(pdev, 0);
  145. res = devm_request_irq(&pdev->dev,
  146. pdata->irq,
  147. mv64x60_pci_isr,
  148. 0,
  149. "[EDAC] PCI err",
  150. pci);
  151. if (res < 0) {
  152. printk(KERN_ERR "%s: Unable to request irq %d for "
  153. "MV64x60 PCI ERR\n", __func__, pdata->irq);
  154. res = -ENODEV;
  155. goto err2;
  156. }
  157. printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for PCI Err\n",
  158. pdata->irq);
  159. }
  160. devres_remove_group(&pdev->dev, mv64x60_pci_err_probe);
  161. /* get this far and it's successful */
  162. edac_dbg(3, "success\n");
  163. return 0;
  164. err2:
  165. edac_pci_del_device(&pdev->dev);
  166. err:
  167. edac_pci_free_ctl_info(pci);
  168. devres_release_group(&pdev->dev, mv64x60_pci_err_probe);
  169. return res;
  170. }
  171. static int mv64x60_pci_err_remove(struct platform_device *pdev)
  172. {
  173. struct edac_pci_ctl_info *pci = platform_get_drvdata(pdev);
  174. edac_dbg(0, "\n");
  175. edac_pci_del_device(&pdev->dev);
  176. edac_pci_free_ctl_info(pci);
  177. return 0;
  178. }
  179. static struct platform_driver mv64x60_pci_err_driver = {
  180. .probe = mv64x60_pci_err_probe,
  181. .remove = mv64x60_pci_err_remove,
  182. .driver = {
  183. .name = "mv64x60_pci_err",
  184. }
  185. };
  186. #endif /* CONFIG_PCI */
  187. /*********************** SRAM err device **********************************/
  188. static void mv64x60_sram_check(struct edac_device_ctl_info *edac_dev)
  189. {
  190. struct mv64x60_sram_pdata *pdata = edac_dev->pvt_info;
  191. u32 cause;
  192. cause = in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_CAUSE);
  193. if (!cause)
  194. return;
  195. printk(KERN_ERR "Error in internal SRAM\n");
  196. printk(KERN_ERR "Cause register: 0x%08x\n", cause);
  197. printk(KERN_ERR "Address Low: 0x%08x\n",
  198. in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_ADDR_LO));
  199. printk(KERN_ERR "Address High: 0x%08x\n",
  200. in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_ADDR_HI));
  201. printk(KERN_ERR "Data Low: 0x%08x\n",
  202. in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_DATA_LO));
  203. printk(KERN_ERR "Data High: 0x%08x\n",
  204. in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_DATA_HI));
  205. printk(KERN_ERR "Parity: 0x%08x\n",
  206. in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_PARITY));
  207. out_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_CAUSE, 0);
  208. edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name);
  209. }
  210. static irqreturn_t mv64x60_sram_isr(int irq, void *dev_id)
  211. {
  212. struct edac_device_ctl_info *edac_dev = dev_id;
  213. struct mv64x60_sram_pdata *pdata = edac_dev->pvt_info;
  214. u32 cause;
  215. cause = in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_CAUSE);
  216. if (!cause)
  217. return IRQ_NONE;
  218. mv64x60_sram_check(edac_dev);
  219. return IRQ_HANDLED;
  220. }
  221. static int mv64x60_sram_err_probe(struct platform_device *pdev)
  222. {
  223. struct edac_device_ctl_info *edac_dev;
  224. struct mv64x60_sram_pdata *pdata;
  225. struct resource *r;
  226. int res = 0;
  227. if (!devres_open_group(&pdev->dev, mv64x60_sram_err_probe, GFP_KERNEL))
  228. return -ENOMEM;
  229. edac_dev = edac_device_alloc_ctl_info(sizeof(*pdata),
  230. "sram", 1, NULL, 0, 0, NULL, 0,
  231. edac_dev_idx);
  232. if (!edac_dev) {
  233. devres_release_group(&pdev->dev, mv64x60_sram_err_probe);
  234. return -ENOMEM;
  235. }
  236. pdata = edac_dev->pvt_info;
  237. pdata->name = "mv64x60_sram_err";
  238. edac_dev->dev = &pdev->dev;
  239. platform_set_drvdata(pdev, edac_dev);
  240. edac_dev->dev_name = dev_name(&pdev->dev);
  241. r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  242. if (!r) {
  243. printk(KERN_ERR "%s: Unable to get resource for "
  244. "SRAM err regs\n", __func__);
  245. res = -ENOENT;
  246. goto err;
  247. }
  248. if (!devm_request_mem_region(&pdev->dev,
  249. r->start,
  250. resource_size(r),
  251. pdata->name)) {
  252. printk(KERN_ERR "%s: Error while request mem region\n",
  253. __func__);
  254. res = -EBUSY;
  255. goto err;
  256. }
  257. pdata->sram_vbase = devm_ioremap(&pdev->dev,
  258. r->start,
  259. resource_size(r));
  260. if (!pdata->sram_vbase) {
  261. printk(KERN_ERR "%s: Unable to setup SRAM err regs\n",
  262. __func__);
  263. res = -ENOMEM;
  264. goto err;
  265. }
  266. /* setup SRAM err registers */
  267. out_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_CAUSE, 0);
  268. edac_dev->mod_name = EDAC_MOD_STR;
  269. edac_dev->ctl_name = pdata->name;
  270. if (edac_op_state == EDAC_OPSTATE_POLL)
  271. edac_dev->edac_check = mv64x60_sram_check;
  272. pdata->edac_idx = edac_dev_idx++;
  273. if (edac_device_add_device(edac_dev) > 0) {
  274. edac_dbg(3, "failed edac_device_add_device()\n");
  275. goto err;
  276. }
  277. if (edac_op_state == EDAC_OPSTATE_INT) {
  278. pdata->irq = platform_get_irq(pdev, 0);
  279. res = devm_request_irq(&pdev->dev,
  280. pdata->irq,
  281. mv64x60_sram_isr,
  282. 0,
  283. "[EDAC] SRAM err",
  284. edac_dev);
  285. if (res < 0) {
  286. printk(KERN_ERR
  287. "%s: Unable to request irq %d for "
  288. "MV64x60 SRAM ERR\n", __func__, pdata->irq);
  289. res = -ENODEV;
  290. goto err2;
  291. }
  292. printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for SRAM Err\n",
  293. pdata->irq);
  294. }
  295. devres_remove_group(&pdev->dev, mv64x60_sram_err_probe);
  296. /* get this far and it's successful */
  297. edac_dbg(3, "success\n");
  298. return 0;
  299. err2:
  300. edac_device_del_device(&pdev->dev);
  301. err:
  302. devres_release_group(&pdev->dev, mv64x60_sram_err_probe);
  303. edac_device_free_ctl_info(edac_dev);
  304. return res;
  305. }
  306. static int mv64x60_sram_err_remove(struct platform_device *pdev)
  307. {
  308. struct edac_device_ctl_info *edac_dev = platform_get_drvdata(pdev);
  309. edac_dbg(0, "\n");
  310. edac_device_del_device(&pdev->dev);
  311. edac_device_free_ctl_info(edac_dev);
  312. return 0;
  313. }
  314. static struct platform_driver mv64x60_sram_err_driver = {
  315. .probe = mv64x60_sram_err_probe,
  316. .remove = mv64x60_sram_err_remove,
  317. .driver = {
  318. .name = "mv64x60_sram_err",
  319. }
  320. };
  321. /*********************** CPU err device **********************************/
  322. static void mv64x60_cpu_check(struct edac_device_ctl_info *edac_dev)
  323. {
  324. struct mv64x60_cpu_pdata *pdata = edac_dev->pvt_info;
  325. u32 cause;
  326. cause = in_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_CAUSE) &
  327. MV64x60_CPU_CAUSE_MASK;
  328. if (!cause)
  329. return;
  330. printk(KERN_ERR "Error on CPU interface\n");
  331. printk(KERN_ERR "Cause register: 0x%08x\n", cause);
  332. printk(KERN_ERR "Address Low: 0x%08x\n",
  333. in_le32(pdata->cpu_vbase[0] + MV64x60_CPU_ERR_ADDR_LO));
  334. printk(KERN_ERR "Address High: 0x%08x\n",
  335. in_le32(pdata->cpu_vbase[0] + MV64x60_CPU_ERR_ADDR_HI));
  336. printk(KERN_ERR "Data Low: 0x%08x\n",
  337. in_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_DATA_LO));
  338. printk(KERN_ERR "Data High: 0x%08x\n",
  339. in_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_DATA_HI));
  340. printk(KERN_ERR "Parity: 0x%08x\n",
  341. in_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_PARITY));
  342. out_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_CAUSE, 0);
  343. edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name);
  344. }
  345. static irqreturn_t mv64x60_cpu_isr(int irq, void *dev_id)
  346. {
  347. struct edac_device_ctl_info *edac_dev = dev_id;
  348. struct mv64x60_cpu_pdata *pdata = edac_dev->pvt_info;
  349. u32 cause;
  350. cause = in_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_CAUSE) &
  351. MV64x60_CPU_CAUSE_MASK;
  352. if (!cause)
  353. return IRQ_NONE;
  354. mv64x60_cpu_check(edac_dev);
  355. return IRQ_HANDLED;
  356. }
  357. static int mv64x60_cpu_err_probe(struct platform_device *pdev)
  358. {
  359. struct edac_device_ctl_info *edac_dev;
  360. struct resource *r;
  361. struct mv64x60_cpu_pdata *pdata;
  362. int res = 0;
  363. if (!devres_open_group(&pdev->dev, mv64x60_cpu_err_probe, GFP_KERNEL))
  364. return -ENOMEM;
  365. edac_dev = edac_device_alloc_ctl_info(sizeof(*pdata),
  366. "cpu", 1, NULL, 0, 0, NULL, 0,
  367. edac_dev_idx);
  368. if (!edac_dev) {
  369. devres_release_group(&pdev->dev, mv64x60_cpu_err_probe);
  370. return -ENOMEM;
  371. }
  372. pdata = edac_dev->pvt_info;
  373. pdata->name = "mv64x60_cpu_err";
  374. edac_dev->dev = &pdev->dev;
  375. platform_set_drvdata(pdev, edac_dev);
  376. edac_dev->dev_name = dev_name(&pdev->dev);
  377. r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  378. if (!r) {
  379. printk(KERN_ERR "%s: Unable to get resource for "
  380. "CPU err regs\n", __func__);
  381. res = -ENOENT;
  382. goto err;
  383. }
  384. if (!devm_request_mem_region(&pdev->dev,
  385. r->start,
  386. resource_size(r),
  387. pdata->name)) {
  388. printk(KERN_ERR "%s: Error while requesting mem region\n",
  389. __func__);
  390. res = -EBUSY;
  391. goto err;
  392. }
  393. pdata->cpu_vbase[0] = devm_ioremap(&pdev->dev,
  394. r->start,
  395. resource_size(r));
  396. if (!pdata->cpu_vbase[0]) {
  397. printk(KERN_ERR "%s: Unable to setup CPU err regs\n", __func__);
  398. res = -ENOMEM;
  399. goto err;
  400. }
  401. r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
  402. if (!r) {
  403. printk(KERN_ERR "%s: Unable to get resource for "
  404. "CPU err regs\n", __func__);
  405. res = -ENOENT;
  406. goto err;
  407. }
  408. if (!devm_request_mem_region(&pdev->dev,
  409. r->start,
  410. resource_size(r),
  411. pdata->name)) {
  412. printk(KERN_ERR "%s: Error while requesting mem region\n",
  413. __func__);
  414. res = -EBUSY;
  415. goto err;
  416. }
  417. pdata->cpu_vbase[1] = devm_ioremap(&pdev->dev,
  418. r->start,
  419. resource_size(r));
  420. if (!pdata->cpu_vbase[1]) {
  421. printk(KERN_ERR "%s: Unable to setup CPU err regs\n", __func__);
  422. res = -ENOMEM;
  423. goto err;
  424. }
  425. /* setup CPU err registers */
  426. out_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_CAUSE, 0);
  427. out_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_MASK, 0);
  428. out_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_MASK, 0x000000ff);
  429. edac_dev->mod_name = EDAC_MOD_STR;
  430. edac_dev->ctl_name = pdata->name;
  431. if (edac_op_state == EDAC_OPSTATE_POLL)
  432. edac_dev->edac_check = mv64x60_cpu_check;
  433. pdata->edac_idx = edac_dev_idx++;
  434. if (edac_device_add_device(edac_dev) > 0) {
  435. edac_dbg(3, "failed edac_device_add_device()\n");
  436. goto err;
  437. }
  438. if (edac_op_state == EDAC_OPSTATE_INT) {
  439. pdata->irq = platform_get_irq(pdev, 0);
  440. res = devm_request_irq(&pdev->dev,
  441. pdata->irq,
  442. mv64x60_cpu_isr,
  443. 0,
  444. "[EDAC] CPU err",
  445. edac_dev);
  446. if (res < 0) {
  447. printk(KERN_ERR
  448. "%s: Unable to request irq %d for MV64x60 "
  449. "CPU ERR\n", __func__, pdata->irq);
  450. res = -ENODEV;
  451. goto err2;
  452. }
  453. printk(KERN_INFO EDAC_MOD_STR
  454. " acquired irq %d for CPU Err\n", pdata->irq);
  455. }
  456. devres_remove_group(&pdev->dev, mv64x60_cpu_err_probe);
  457. /* get this far and it's successful */
  458. edac_dbg(3, "success\n");
  459. return 0;
  460. err2:
  461. edac_device_del_device(&pdev->dev);
  462. err:
  463. devres_release_group(&pdev->dev, mv64x60_cpu_err_probe);
  464. edac_device_free_ctl_info(edac_dev);
  465. return res;
  466. }
  467. static int mv64x60_cpu_err_remove(struct platform_device *pdev)
  468. {
  469. struct edac_device_ctl_info *edac_dev = platform_get_drvdata(pdev);
  470. edac_dbg(0, "\n");
  471. edac_device_del_device(&pdev->dev);
  472. edac_device_free_ctl_info(edac_dev);
  473. return 0;
  474. }
  475. static struct platform_driver mv64x60_cpu_err_driver = {
  476. .probe = mv64x60_cpu_err_probe,
  477. .remove = mv64x60_cpu_err_remove,
  478. .driver = {
  479. .name = "mv64x60_cpu_err",
  480. }
  481. };
  482. /*********************** DRAM err device **********************************/
  483. static void mv64x60_mc_check(struct mem_ctl_info *mci)
  484. {
  485. struct mv64x60_mc_pdata *pdata = mci->pvt_info;
  486. u32 reg;
  487. u32 err_addr;
  488. u32 sdram_ecc;
  489. u32 comp_ecc;
  490. u32 syndrome;
  491. reg = in_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR);
  492. if (!reg)
  493. return;
  494. err_addr = reg & ~0x3;
  495. sdram_ecc = in_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_RCVD);
  496. comp_ecc = in_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_CALC);
  497. syndrome = sdram_ecc ^ comp_ecc;
  498. /* first bit clear in ECC Err Reg, 1 bit error, correctable by HW */
  499. if (!(reg & 0x1))
  500. edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
  501. err_addr >> PAGE_SHIFT,
  502. err_addr & PAGE_MASK, syndrome,
  503. 0, 0, -1,
  504. mci->ctl_name, "");
  505. else /* 2 bit error, UE */
  506. edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
  507. err_addr >> PAGE_SHIFT,
  508. err_addr & PAGE_MASK, 0,
  509. 0, 0, -1,
  510. mci->ctl_name, "");
  511. /* clear the error */
  512. out_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR, 0);
  513. }
  514. static irqreturn_t mv64x60_mc_isr(int irq, void *dev_id)
  515. {
  516. struct mem_ctl_info *mci = dev_id;
  517. struct mv64x60_mc_pdata *pdata = mci->pvt_info;
  518. u32 reg;
  519. reg = in_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR);
  520. if (!reg)
  521. return IRQ_NONE;
  522. /* writing 0's to the ECC err addr in check function clears irq */
  523. mv64x60_mc_check(mci);
  524. return IRQ_HANDLED;
  525. }
  526. static void get_total_mem(struct mv64x60_mc_pdata *pdata)
  527. {
  528. struct device_node *np = NULL;
  529. const unsigned int *reg;
  530. np = of_find_node_by_type(NULL, "memory");
  531. if (!np)
  532. return;
  533. reg = of_get_property(np, "reg", NULL);
  534. pdata->total_mem = reg[1];
  535. }
  536. static void mv64x60_init_csrows(struct mem_ctl_info *mci,
  537. struct mv64x60_mc_pdata *pdata)
  538. {
  539. struct csrow_info *csrow;
  540. struct dimm_info *dimm;
  541. u32 devtype;
  542. u32 ctl;
  543. get_total_mem(pdata);
  544. ctl = in_le32(pdata->mc_vbase + MV64X60_SDRAM_CONFIG);
  545. csrow = mci->csrows[0];
  546. dimm = csrow->channels[0]->dimm;
  547. dimm->nr_pages = pdata->total_mem >> PAGE_SHIFT;
  548. dimm->grain = 8;
  549. dimm->mtype = (ctl & MV64X60_SDRAM_REGISTERED) ? MEM_RDDR : MEM_DDR;
  550. devtype = (ctl >> 20) & 0x3;
  551. switch (devtype) {
  552. case 0x0:
  553. dimm->dtype = DEV_X32;
  554. break;
  555. case 0x2: /* could be X8 too, but no way to tell */
  556. dimm->dtype = DEV_X16;
  557. break;
  558. case 0x3:
  559. dimm->dtype = DEV_X4;
  560. break;
  561. default:
  562. dimm->dtype = DEV_UNKNOWN;
  563. break;
  564. }
  565. dimm->edac_mode = EDAC_SECDED;
  566. }
  567. static int mv64x60_mc_err_probe(struct platform_device *pdev)
  568. {
  569. struct mem_ctl_info *mci;
  570. struct edac_mc_layer layers[2];
  571. struct mv64x60_mc_pdata *pdata;
  572. struct resource *r;
  573. u32 ctl;
  574. int res = 0;
  575. if (!devres_open_group(&pdev->dev, mv64x60_mc_err_probe, GFP_KERNEL))
  576. return -ENOMEM;
  577. layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
  578. layers[0].size = 1;
  579. layers[0].is_virt_csrow = true;
  580. layers[1].type = EDAC_MC_LAYER_CHANNEL;
  581. layers[1].size = 1;
  582. layers[1].is_virt_csrow = false;
  583. mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), layers,
  584. sizeof(struct mv64x60_mc_pdata));
  585. if (!mci) {
  586. printk(KERN_ERR "%s: No memory for CPU err\n", __func__);
  587. devres_release_group(&pdev->dev, mv64x60_mc_err_probe);
  588. return -ENOMEM;
  589. }
  590. pdata = mci->pvt_info;
  591. mci->pdev = &pdev->dev;
  592. platform_set_drvdata(pdev, mci);
  593. pdata->name = "mv64x60_mc_err";
  594. mci->dev_name = dev_name(&pdev->dev);
  595. pdata->edac_idx = edac_mc_idx++;
  596. r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  597. if (!r) {
  598. printk(KERN_ERR "%s: Unable to get resource for "
  599. "MC err regs\n", __func__);
  600. res = -ENOENT;
  601. goto err;
  602. }
  603. if (!devm_request_mem_region(&pdev->dev,
  604. r->start,
  605. resource_size(r),
  606. pdata->name)) {
  607. printk(KERN_ERR "%s: Error while requesting mem region\n",
  608. __func__);
  609. res = -EBUSY;
  610. goto err;
  611. }
  612. pdata->mc_vbase = devm_ioremap(&pdev->dev,
  613. r->start,
  614. resource_size(r));
  615. if (!pdata->mc_vbase) {
  616. printk(KERN_ERR "%s: Unable to setup MC err regs\n", __func__);
  617. res = -ENOMEM;
  618. goto err;
  619. }
  620. ctl = in_le32(pdata->mc_vbase + MV64X60_SDRAM_CONFIG);
  621. if (!(ctl & MV64X60_SDRAM_ECC)) {
  622. /* Non-ECC RAM? */
  623. printk(KERN_WARNING "%s: No ECC DIMMs discovered\n", __func__);
  624. res = -ENODEV;
  625. goto err;
  626. }
  627. edac_dbg(3, "init mci\n");
  628. mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR;
  629. mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
  630. mci->edac_cap = EDAC_FLAG_SECDED;
  631. mci->mod_name = EDAC_MOD_STR;
  632. mci->mod_ver = MV64x60_REVISION;
  633. mci->ctl_name = mv64x60_ctl_name;
  634. if (edac_op_state == EDAC_OPSTATE_POLL)
  635. mci->edac_check = mv64x60_mc_check;
  636. mci->ctl_page_to_phys = NULL;
  637. mci->scrub_mode = SCRUB_SW_SRC;
  638. mv64x60_init_csrows(mci, pdata);
  639. /* setup MC registers */
  640. out_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR, 0);
  641. ctl = in_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_CNTL);
  642. ctl = (ctl & 0xff00ffff) | 0x10000;
  643. out_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_CNTL, ctl);
  644. res = edac_mc_add_mc(mci);
  645. if (res) {
  646. edac_dbg(3, "failed edac_mc_add_mc()\n");
  647. goto err;
  648. }
  649. if (edac_op_state == EDAC_OPSTATE_INT) {
  650. /* acquire interrupt that reports errors */
  651. pdata->irq = platform_get_irq(pdev, 0);
  652. res = devm_request_irq(&pdev->dev,
  653. pdata->irq,
  654. mv64x60_mc_isr,
  655. 0,
  656. "[EDAC] MC err",
  657. mci);
  658. if (res < 0) {
  659. printk(KERN_ERR "%s: Unable to request irq %d for "
  660. "MV64x60 DRAM ERR\n", __func__, pdata->irq);
  661. res = -ENODEV;
  662. goto err2;
  663. }
  664. printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for MC Err\n",
  665. pdata->irq);
  666. }
  667. /* get this far and it's successful */
  668. edac_dbg(3, "success\n");
  669. return 0;
  670. err2:
  671. edac_mc_del_mc(&pdev->dev);
  672. err:
  673. devres_release_group(&pdev->dev, mv64x60_mc_err_probe);
  674. edac_mc_free(mci);
  675. return res;
  676. }
  677. static int mv64x60_mc_err_remove(struct platform_device *pdev)
  678. {
  679. struct mem_ctl_info *mci = platform_get_drvdata(pdev);
  680. edac_dbg(0, "\n");
  681. edac_mc_del_mc(&pdev->dev);
  682. edac_mc_free(mci);
  683. return 0;
  684. }
  685. static struct platform_driver mv64x60_mc_err_driver = {
  686. .probe = mv64x60_mc_err_probe,
  687. .remove = mv64x60_mc_err_remove,
  688. .driver = {
  689. .name = "mv64x60_mc_err",
  690. }
  691. };
  692. static struct platform_driver * const drivers[] = {
  693. &mv64x60_mc_err_driver,
  694. &mv64x60_cpu_err_driver,
  695. &mv64x60_sram_err_driver,
  696. #ifdef CONFIG_PCI
  697. &mv64x60_pci_err_driver,
  698. #endif
  699. };
  700. static int __init mv64x60_edac_init(void)
  701. {
  702. int ret = 0;
  703. printk(KERN_INFO "Marvell MV64x60 EDAC driver " MV64x60_REVISION "\n");
  704. printk(KERN_INFO "\t(C) 2006-2007 MontaVista Software\n");
  705. /* make sure error reporting method is sane */
  706. switch (edac_op_state) {
  707. case EDAC_OPSTATE_POLL:
  708. case EDAC_OPSTATE_INT:
  709. break;
  710. default:
  711. edac_op_state = EDAC_OPSTATE_INT;
  712. break;
  713. }
  714. return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
  715. }
  716. module_init(mv64x60_edac_init);
  717. static void __exit mv64x60_edac_exit(void)
  718. {
  719. platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
  720. }
  721. module_exit(mv64x60_edac_exit);
  722. MODULE_LICENSE("GPL");
  723. MODULE_AUTHOR("Montavista Software, Inc.");
  724. module_param(edac_op_state, int, 0444);
  725. MODULE_PARM_DESC(edac_op_state,
  726. "EDAC Error Reporting state: 0=Poll, 2=Interrupt");