i7300_edac.c 35 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219
  1. /*
  2. * Intel 7300 class Memory Controllers kernel module (Clarksboro)
  3. *
  4. * This file may be distributed under the terms of the
  5. * GNU General Public License version 2 only.
  6. *
  7. * Copyright (c) 2010 by:
  8. * Mauro Carvalho Chehab
  9. *
  10. * Red Hat Inc. http://www.redhat.com
  11. *
  12. * Intel 7300 Chipset Memory Controller Hub (MCH) - Datasheet
  13. * http://www.intel.com/Assets/PDF/datasheet/318082.pdf
  14. *
  15. * TODO: The chipset allow checking for PCI Express errors also. Currently,
  16. * the driver covers only memory error errors
  17. *
  18. * This driver uses "csrows" EDAC attribute to represent DIMM slot#
  19. */
  20. #include <linux/module.h>
  21. #include <linux/init.h>
  22. #include <linux/pci.h>
  23. #include <linux/pci_ids.h>
  24. #include <linux/slab.h>
  25. #include <linux/edac.h>
  26. #include <linux/mmzone.h>
  27. #include "edac_core.h"
  28. /*
  29. * Alter this version for the I7300 module when modifications are made
  30. */
  31. #define I7300_REVISION " Ver: 1.0.0"
  32. #define EDAC_MOD_STR "i7300_edac"
  33. #define i7300_printk(level, fmt, arg...) \
  34. edac_printk(level, "i7300", fmt, ##arg)
  35. #define i7300_mc_printk(mci, level, fmt, arg...) \
  36. edac_mc_chipset_printk(mci, level, "i7300", fmt, ##arg)
  37. /***********************************************
  38. * i7300 Limit constants Structs and static vars
  39. ***********************************************/
  40. /*
  41. * Memory topology is organized as:
  42. * Branch 0 - 2 channels: channels 0 and 1 (FDB0 PCI dev 21.0)
  43. * Branch 1 - 2 channels: channels 2 and 3 (FDB1 PCI dev 22.0)
  44. * Each channel can have to 8 DIMM sets (called as SLOTS)
  45. * Slots should generally be filled in pairs
  46. * Except on Single Channel mode of operation
  47. * just slot 0/channel0 filled on this mode
  48. * On normal operation mode, the two channels on a branch should be
  49. * filled together for the same SLOT#
  50. * When in mirrored mode, Branch 1 replicate memory at Branch 0, so, the four
  51. * channels on both branches should be filled
  52. */
  53. /* Limits for i7300 */
  54. #define MAX_SLOTS 8
  55. #define MAX_BRANCHES 2
  56. #define MAX_CH_PER_BRANCH 2
  57. #define MAX_CHANNELS (MAX_CH_PER_BRANCH * MAX_BRANCHES)
  58. #define MAX_MIR 3
  59. #define to_channel(ch, branch) ((((branch)) << 1) | (ch))
  60. #define to_csrow(slot, ch, branch) \
  61. (to_channel(ch, branch) | ((slot) << 2))
  62. /* Device name and register DID (Device ID) */
  63. struct i7300_dev_info {
  64. const char *ctl_name; /* name for this device */
  65. u16 fsb_mapping_errors; /* DID for the branchmap,control */
  66. };
  67. /* Table of devices attributes supported by this driver */
  68. static const struct i7300_dev_info i7300_devs[] = {
  69. {
  70. .ctl_name = "I7300",
  71. .fsb_mapping_errors = PCI_DEVICE_ID_INTEL_I7300_MCH_ERR,
  72. },
  73. };
  74. struct i7300_dimm_info {
  75. int megabytes; /* size, 0 means not present */
  76. };
  77. /* driver private data structure */
  78. struct i7300_pvt {
  79. struct pci_dev *pci_dev_16_0_fsb_ctlr; /* 16.0 */
  80. struct pci_dev *pci_dev_16_1_fsb_addr_map; /* 16.1 */
  81. struct pci_dev *pci_dev_16_2_fsb_err_regs; /* 16.2 */
  82. struct pci_dev *pci_dev_2x_0_fbd_branch[MAX_BRANCHES]; /* 21.0 and 22.0 */
  83. u16 tolm; /* top of low memory */
  84. u64 ambase; /* AMB BAR */
  85. u32 mc_settings; /* Report several settings */
  86. u32 mc_settings_a;
  87. u16 mir[MAX_MIR]; /* Memory Interleave Reg*/
  88. u16 mtr[MAX_SLOTS][MAX_BRANCHES]; /* Memory Technlogy Reg */
  89. u16 ambpresent[MAX_CHANNELS]; /* AMB present regs */
  90. /* DIMM information matrix, allocating architecture maximums */
  91. struct i7300_dimm_info dimm_info[MAX_SLOTS][MAX_CHANNELS];
  92. /* Temporary buffer for use when preparing error messages */
  93. char *tmp_prt_buffer;
  94. };
  95. /* FIXME: Why do we need to have this static? */
  96. static struct edac_pci_ctl_info *i7300_pci;
  97. /***************************************************
  98. * i7300 Register definitions for memory enumeration
  99. ***************************************************/
  100. /*
  101. * Device 16,
  102. * Function 0: System Address (not documented)
  103. * Function 1: Memory Branch Map, Control, Errors Register
  104. */
  105. /* OFFSETS for Function 0 */
  106. #define AMBASE 0x48 /* AMB Mem Mapped Reg Region Base */
  107. #define MAXCH 0x56 /* Max Channel Number */
  108. #define MAXDIMMPERCH 0x57 /* Max DIMM PER Channel Number */
  109. /* OFFSETS for Function 1 */
  110. #define MC_SETTINGS 0x40
  111. #define IS_MIRRORED(mc) ((mc) & (1 << 16))
  112. #define IS_ECC_ENABLED(mc) ((mc) & (1 << 5))
  113. #define IS_RETRY_ENABLED(mc) ((mc) & (1 << 31))
  114. #define IS_SCRBALGO_ENHANCED(mc) ((mc) & (1 << 8))
  115. #define MC_SETTINGS_A 0x58
  116. #define IS_SINGLE_MODE(mca) ((mca) & (1 << 14))
  117. #define TOLM 0x6C
  118. #define MIR0 0x80
  119. #define MIR1 0x84
  120. #define MIR2 0x88
  121. /*
  122. * Note: Other Intel EDAC drivers use AMBPRESENT to identify if the available
  123. * memory. From datasheet item 7.3.1 (FB-DIMM technology & organization), it
  124. * seems that we cannot use this information directly for the same usage.
  125. * Each memory slot may have up to 2 AMB interfaces, one for income and another
  126. * for outcome interface to the next slot.
  127. * For now, the driver just stores the AMB present registers, but rely only at
  128. * the MTR info to detect memory.
  129. * Datasheet is also not clear about how to map each AMBPRESENT registers to
  130. * one of the 4 available channels.
  131. */
  132. #define AMBPRESENT_0 0x64
  133. #define AMBPRESENT_1 0x66
  134. static const u16 mtr_regs[MAX_SLOTS] = {
  135. 0x80, 0x84, 0x88, 0x8c,
  136. 0x82, 0x86, 0x8a, 0x8e
  137. };
  138. /*
  139. * Defines to extract the vaious fields from the
  140. * MTRx - Memory Technology Registers
  141. */
  142. #define MTR_DIMMS_PRESENT(mtr) ((mtr) & (1 << 8))
  143. #define MTR_DIMMS_ETHROTTLE(mtr) ((mtr) & (1 << 7))
  144. #define MTR_DRAM_WIDTH(mtr) (((mtr) & (1 << 6)) ? 8 : 4)
  145. #define MTR_DRAM_BANKS(mtr) (((mtr) & (1 << 5)) ? 8 : 4)
  146. #define MTR_DIMM_RANKS(mtr) (((mtr) & (1 << 4)) ? 1 : 0)
  147. #define MTR_DIMM_ROWS(mtr) (((mtr) >> 2) & 0x3)
  148. #define MTR_DRAM_BANKS_ADDR_BITS 2
  149. #define MTR_DIMM_ROWS_ADDR_BITS(mtr) (MTR_DIMM_ROWS(mtr) + 13)
  150. #define MTR_DIMM_COLS(mtr) ((mtr) & 0x3)
  151. #define MTR_DIMM_COLS_ADDR_BITS(mtr) (MTR_DIMM_COLS(mtr) + 10)
  152. /************************************************
  153. * i7300 Register definitions for error detection
  154. ************************************************/
  155. /*
  156. * Device 16.1: FBD Error Registers
  157. */
  158. #define FERR_FAT_FBD 0x98
  159. static const char *ferr_fat_fbd_name[] = {
  160. [22] = "Non-Redundant Fast Reset Timeout",
  161. [2] = ">Tmid Thermal event with intelligent throttling disabled",
  162. [1] = "Memory or FBD configuration CRC read error",
  163. [0] = "Memory Write error on non-redundant retry or "
  164. "FBD configuration Write error on retry",
  165. };
  166. #define GET_FBD_FAT_IDX(fbderr) (((fbderr) >> 28) & 3)
  167. #define FERR_FAT_FBD_ERR_MASK ((1 << 0) | (1 << 1) | (1 << 2) | (1 << 22))
  168. #define FERR_NF_FBD 0xa0
  169. static const char *ferr_nf_fbd_name[] = {
  170. [24] = "DIMM-Spare Copy Completed",
  171. [23] = "DIMM-Spare Copy Initiated",
  172. [22] = "Redundant Fast Reset Timeout",
  173. [21] = "Memory Write error on redundant retry",
  174. [18] = "SPD protocol Error",
  175. [17] = "FBD Northbound parity error on FBD Sync Status",
  176. [16] = "Correctable Patrol Data ECC",
  177. [15] = "Correctable Resilver- or Spare-Copy Data ECC",
  178. [14] = "Correctable Mirrored Demand Data ECC",
  179. [13] = "Correctable Non-Mirrored Demand Data ECC",
  180. [11] = "Memory or FBD configuration CRC read error",
  181. [10] = "FBD Configuration Write error on first attempt",
  182. [9] = "Memory Write error on first attempt",
  183. [8] = "Non-Aliased Uncorrectable Patrol Data ECC",
  184. [7] = "Non-Aliased Uncorrectable Resilver- or Spare-Copy Data ECC",
  185. [6] = "Non-Aliased Uncorrectable Mirrored Demand Data ECC",
  186. [5] = "Non-Aliased Uncorrectable Non-Mirrored Demand Data ECC",
  187. [4] = "Aliased Uncorrectable Patrol Data ECC",
  188. [3] = "Aliased Uncorrectable Resilver- or Spare-Copy Data ECC",
  189. [2] = "Aliased Uncorrectable Mirrored Demand Data ECC",
  190. [1] = "Aliased Uncorrectable Non-Mirrored Demand Data ECC",
  191. [0] = "Uncorrectable Data ECC on Replay",
  192. };
  193. #define GET_FBD_NF_IDX(fbderr) (((fbderr) >> 28) & 3)
  194. #define FERR_NF_FBD_ERR_MASK ((1 << 24) | (1 << 23) | (1 << 22) | (1 << 21) |\
  195. (1 << 18) | (1 << 17) | (1 << 16) | (1 << 15) |\
  196. (1 << 14) | (1 << 13) | (1 << 11) | (1 << 10) |\
  197. (1 << 9) | (1 << 8) | (1 << 7) | (1 << 6) |\
  198. (1 << 5) | (1 << 4) | (1 << 3) | (1 << 2) |\
  199. (1 << 1) | (1 << 0))
  200. #define EMASK_FBD 0xa8
  201. #define EMASK_FBD_ERR_MASK ((1 << 27) | (1 << 26) | (1 << 25) | (1 << 24) |\
  202. (1 << 22) | (1 << 21) | (1 << 20) | (1 << 19) |\
  203. (1 << 18) | (1 << 17) | (1 << 16) | (1 << 14) |\
  204. (1 << 13) | (1 << 12) | (1 << 11) | (1 << 10) |\
  205. (1 << 9) | (1 << 8) | (1 << 7) | (1 << 6) |\
  206. (1 << 5) | (1 << 4) | (1 << 3) | (1 << 2) |\
  207. (1 << 1) | (1 << 0))
  208. /*
  209. * Device 16.2: Global Error Registers
  210. */
  211. #define FERR_GLOBAL_HI 0x48
  212. static const char *ferr_global_hi_name[] = {
  213. [3] = "FSB 3 Fatal Error",
  214. [2] = "FSB 2 Fatal Error",
  215. [1] = "FSB 1 Fatal Error",
  216. [0] = "FSB 0 Fatal Error",
  217. };
  218. #define ferr_global_hi_is_fatal(errno) 1
  219. #define FERR_GLOBAL_LO 0x40
  220. static const char *ferr_global_lo_name[] = {
  221. [31] = "Internal MCH Fatal Error",
  222. [30] = "Intel QuickData Technology Device Fatal Error",
  223. [29] = "FSB1 Fatal Error",
  224. [28] = "FSB0 Fatal Error",
  225. [27] = "FBD Channel 3 Fatal Error",
  226. [26] = "FBD Channel 2 Fatal Error",
  227. [25] = "FBD Channel 1 Fatal Error",
  228. [24] = "FBD Channel 0 Fatal Error",
  229. [23] = "PCI Express Device 7Fatal Error",
  230. [22] = "PCI Express Device 6 Fatal Error",
  231. [21] = "PCI Express Device 5 Fatal Error",
  232. [20] = "PCI Express Device 4 Fatal Error",
  233. [19] = "PCI Express Device 3 Fatal Error",
  234. [18] = "PCI Express Device 2 Fatal Error",
  235. [17] = "PCI Express Device 1 Fatal Error",
  236. [16] = "ESI Fatal Error",
  237. [15] = "Internal MCH Non-Fatal Error",
  238. [14] = "Intel QuickData Technology Device Non Fatal Error",
  239. [13] = "FSB1 Non-Fatal Error",
  240. [12] = "FSB 0 Non-Fatal Error",
  241. [11] = "FBD Channel 3 Non-Fatal Error",
  242. [10] = "FBD Channel 2 Non-Fatal Error",
  243. [9] = "FBD Channel 1 Non-Fatal Error",
  244. [8] = "FBD Channel 0 Non-Fatal Error",
  245. [7] = "PCI Express Device 7 Non-Fatal Error",
  246. [6] = "PCI Express Device 6 Non-Fatal Error",
  247. [5] = "PCI Express Device 5 Non-Fatal Error",
  248. [4] = "PCI Express Device 4 Non-Fatal Error",
  249. [3] = "PCI Express Device 3 Non-Fatal Error",
  250. [2] = "PCI Express Device 2 Non-Fatal Error",
  251. [1] = "PCI Express Device 1 Non-Fatal Error",
  252. [0] = "ESI Non-Fatal Error",
  253. };
  254. #define ferr_global_lo_is_fatal(errno) ((errno < 16) ? 0 : 1)
  255. #define NRECMEMA 0xbe
  256. #define NRECMEMA_BANK(v) (((v) >> 12) & 7)
  257. #define NRECMEMA_RANK(v) (((v) >> 8) & 15)
  258. #define NRECMEMB 0xc0
  259. #define NRECMEMB_IS_WR(v) ((v) & (1 << 31))
  260. #define NRECMEMB_CAS(v) (((v) >> 16) & 0x1fff)
  261. #define NRECMEMB_RAS(v) ((v) & 0xffff)
  262. #define REDMEMA 0xdc
  263. #define REDMEMB 0x7c
  264. #define IS_SECOND_CH(v) ((v) * (1 << 17))
  265. #define RECMEMA 0xe0
  266. #define RECMEMA_BANK(v) (((v) >> 12) & 7)
  267. #define RECMEMA_RANK(v) (((v) >> 8) & 15)
  268. #define RECMEMB 0xe4
  269. #define RECMEMB_IS_WR(v) ((v) & (1 << 31))
  270. #define RECMEMB_CAS(v) (((v) >> 16) & 0x1fff)
  271. #define RECMEMB_RAS(v) ((v) & 0xffff)
  272. /********************************************
  273. * i7300 Functions related to error detection
  274. ********************************************/
  275. /**
  276. * get_err_from_table() - Gets the error message from a table
  277. * @table: table name (array of char *)
  278. * @size: number of elements at the table
  279. * @pos: position of the element to be returned
  280. *
  281. * This is a small routine that gets the pos-th element of a table. If the
  282. * element doesn't exist (or it is empty), it returns "reserved".
  283. * Instead of calling it directly, the better is to call via the macro
  284. * GET_ERR_FROM_TABLE(), that automatically checks the table size via
  285. * ARRAY_SIZE() macro
  286. */
  287. static const char *get_err_from_table(const char *table[], int size, int pos)
  288. {
  289. if (unlikely(pos >= size))
  290. return "Reserved";
  291. if (unlikely(!table[pos]))
  292. return "Reserved";
  293. return table[pos];
  294. }
  295. #define GET_ERR_FROM_TABLE(table, pos) \
  296. get_err_from_table(table, ARRAY_SIZE(table), pos)
  297. /**
  298. * i7300_process_error_global() - Retrieve the hardware error information from
  299. * the hardware global error registers and
  300. * sends it to dmesg
  301. * @mci: struct mem_ctl_info pointer
  302. */
  303. static void i7300_process_error_global(struct mem_ctl_info *mci)
  304. {
  305. struct i7300_pvt *pvt;
  306. u32 errnum, error_reg;
  307. unsigned long errors;
  308. const char *specific;
  309. bool is_fatal;
  310. pvt = mci->pvt_info;
  311. /* read in the 1st FATAL error register */
  312. pci_read_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
  313. FERR_GLOBAL_HI, &error_reg);
  314. if (unlikely(error_reg)) {
  315. errors = error_reg;
  316. errnum = find_first_bit(&errors,
  317. ARRAY_SIZE(ferr_global_hi_name));
  318. specific = GET_ERR_FROM_TABLE(ferr_global_hi_name, errnum);
  319. is_fatal = ferr_global_hi_is_fatal(errnum);
  320. /* Clear the error bit */
  321. pci_write_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
  322. FERR_GLOBAL_HI, error_reg);
  323. goto error_global;
  324. }
  325. pci_read_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
  326. FERR_GLOBAL_LO, &error_reg);
  327. if (unlikely(error_reg)) {
  328. errors = error_reg;
  329. errnum = find_first_bit(&errors,
  330. ARRAY_SIZE(ferr_global_lo_name));
  331. specific = GET_ERR_FROM_TABLE(ferr_global_lo_name, errnum);
  332. is_fatal = ferr_global_lo_is_fatal(errnum);
  333. /* Clear the error bit */
  334. pci_write_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
  335. FERR_GLOBAL_LO, error_reg);
  336. goto error_global;
  337. }
  338. return;
  339. error_global:
  340. i7300_mc_printk(mci, KERN_EMERG, "%s misc error: %s\n",
  341. is_fatal ? "Fatal" : "NOT fatal", specific);
  342. }
  343. /**
  344. * i7300_process_fbd_error() - Retrieve the hardware error information from
  345. * the FBD error registers and sends it via
  346. * EDAC error API calls
  347. * @mci: struct mem_ctl_info pointer
  348. */
  349. static void i7300_process_fbd_error(struct mem_ctl_info *mci)
  350. {
  351. struct i7300_pvt *pvt;
  352. u32 errnum, value, error_reg;
  353. u16 val16;
  354. unsigned branch, channel, bank, rank, cas, ras;
  355. u32 syndrome;
  356. unsigned long errors;
  357. const char *specific;
  358. bool is_wr;
  359. pvt = mci->pvt_info;
  360. /* read in the 1st FATAL error register */
  361. pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
  362. FERR_FAT_FBD, &error_reg);
  363. if (unlikely(error_reg & FERR_FAT_FBD_ERR_MASK)) {
  364. errors = error_reg & FERR_FAT_FBD_ERR_MASK ;
  365. errnum = find_first_bit(&errors,
  366. ARRAY_SIZE(ferr_fat_fbd_name));
  367. specific = GET_ERR_FROM_TABLE(ferr_fat_fbd_name, errnum);
  368. branch = (GET_FBD_FAT_IDX(error_reg) == 2) ? 1 : 0;
  369. pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map,
  370. NRECMEMA, &val16);
  371. bank = NRECMEMA_BANK(val16);
  372. rank = NRECMEMA_RANK(val16);
  373. pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
  374. NRECMEMB, &value);
  375. is_wr = NRECMEMB_IS_WR(value);
  376. cas = NRECMEMB_CAS(value);
  377. ras = NRECMEMB_RAS(value);
  378. /* Clean the error register */
  379. pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
  380. FERR_FAT_FBD, error_reg);
  381. snprintf(pvt->tmp_prt_buffer, PAGE_SIZE,
  382. "Bank=%d RAS=%d CAS=%d Err=0x%lx (%s))",
  383. bank, ras, cas, errors, specific);
  384. edac_mc_handle_error(HW_EVENT_ERR_FATAL, mci, 1, 0, 0, 0,
  385. branch, -1, rank,
  386. is_wr ? "Write error" : "Read error",
  387. pvt->tmp_prt_buffer);
  388. }
  389. /* read in the 1st NON-FATAL error register */
  390. pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
  391. FERR_NF_FBD, &error_reg);
  392. if (unlikely(error_reg & FERR_NF_FBD_ERR_MASK)) {
  393. errors = error_reg & FERR_NF_FBD_ERR_MASK;
  394. errnum = find_first_bit(&errors,
  395. ARRAY_SIZE(ferr_nf_fbd_name));
  396. specific = GET_ERR_FROM_TABLE(ferr_nf_fbd_name, errnum);
  397. branch = (GET_FBD_NF_IDX(error_reg) == 2) ? 1 : 0;
  398. pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
  399. REDMEMA, &syndrome);
  400. pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map,
  401. RECMEMA, &val16);
  402. bank = RECMEMA_BANK(val16);
  403. rank = RECMEMA_RANK(val16);
  404. pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
  405. RECMEMB, &value);
  406. is_wr = RECMEMB_IS_WR(value);
  407. cas = RECMEMB_CAS(value);
  408. ras = RECMEMB_RAS(value);
  409. pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
  410. REDMEMB, &value);
  411. channel = (branch << 1);
  412. if (IS_SECOND_CH(value))
  413. channel++;
  414. /* Clear the error bit */
  415. pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
  416. FERR_NF_FBD, error_reg);
  417. /* Form out message */
  418. snprintf(pvt->tmp_prt_buffer, PAGE_SIZE,
  419. "DRAM-Bank=%d RAS=%d CAS=%d, Err=0x%lx (%s))",
  420. bank, ras, cas, errors, specific);
  421. edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 0, 0,
  422. syndrome,
  423. branch >> 1, channel % 2, rank,
  424. is_wr ? "Write error" : "Read error",
  425. pvt->tmp_prt_buffer);
  426. }
  427. return;
  428. }
  429. /**
  430. * i7300_check_error() - Calls the error checking subroutines
  431. * @mci: struct mem_ctl_info pointer
  432. */
  433. static void i7300_check_error(struct mem_ctl_info *mci)
  434. {
  435. i7300_process_error_global(mci);
  436. i7300_process_fbd_error(mci);
  437. };
  438. /**
  439. * i7300_clear_error() - Clears the error registers
  440. * @mci: struct mem_ctl_info pointer
  441. */
  442. static void i7300_clear_error(struct mem_ctl_info *mci)
  443. {
  444. struct i7300_pvt *pvt = mci->pvt_info;
  445. u32 value;
  446. /*
  447. * All error values are RWC - we need to read and write 1 to the
  448. * bit that we want to cleanup
  449. */
  450. /* Clear global error registers */
  451. pci_read_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
  452. FERR_GLOBAL_HI, &value);
  453. pci_write_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
  454. FERR_GLOBAL_HI, value);
  455. pci_read_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
  456. FERR_GLOBAL_LO, &value);
  457. pci_write_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
  458. FERR_GLOBAL_LO, value);
  459. /* Clear FBD error registers */
  460. pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
  461. FERR_FAT_FBD, &value);
  462. pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
  463. FERR_FAT_FBD, value);
  464. pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
  465. FERR_NF_FBD, &value);
  466. pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
  467. FERR_NF_FBD, value);
  468. }
  469. /**
  470. * i7300_enable_error_reporting() - Enable the memory reporting logic at the
  471. * hardware
  472. * @mci: struct mem_ctl_info pointer
  473. */
  474. static void i7300_enable_error_reporting(struct mem_ctl_info *mci)
  475. {
  476. struct i7300_pvt *pvt = mci->pvt_info;
  477. u32 fbd_error_mask;
  478. /* Read the FBD Error Mask Register */
  479. pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
  480. EMASK_FBD, &fbd_error_mask);
  481. /* Enable with a '0' */
  482. fbd_error_mask &= ~(EMASK_FBD_ERR_MASK);
  483. pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
  484. EMASK_FBD, fbd_error_mask);
  485. }
  486. /************************************************
  487. * i7300 Functions related to memory enumberation
  488. ************************************************/
  489. /**
  490. * decode_mtr() - Decodes the MTR descriptor, filling the edac structs
  491. * @pvt: pointer to the private data struct used by i7300 driver
  492. * @slot: DIMM slot (0 to 7)
  493. * @ch: Channel number within the branch (0 or 1)
  494. * @branch: Branch number (0 or 1)
  495. * @dinfo: Pointer to DIMM info where dimm size is stored
  496. * @p_csrow: Pointer to the struct csrow_info that corresponds to that element
  497. */
  498. static int decode_mtr(struct i7300_pvt *pvt,
  499. int slot, int ch, int branch,
  500. struct i7300_dimm_info *dinfo,
  501. struct dimm_info *dimm)
  502. {
  503. int mtr, ans, addrBits, channel;
  504. channel = to_channel(ch, branch);
  505. mtr = pvt->mtr[slot][branch];
  506. ans = MTR_DIMMS_PRESENT(mtr) ? 1 : 0;
  507. edac_dbg(2, "\tMTR%d CH%d: DIMMs are %sPresent (mtr)\n",
  508. slot, channel, ans ? "" : "NOT ");
  509. /* Determine if there is a DIMM present in this DIMM slot */
  510. if (!ans)
  511. return 0;
  512. /* Start with the number of bits for a Bank
  513. * on the DRAM */
  514. addrBits = MTR_DRAM_BANKS_ADDR_BITS;
  515. /* Add thenumber of ROW bits */
  516. addrBits += MTR_DIMM_ROWS_ADDR_BITS(mtr);
  517. /* add the number of COLUMN bits */
  518. addrBits += MTR_DIMM_COLS_ADDR_BITS(mtr);
  519. /* add the number of RANK bits */
  520. addrBits += MTR_DIMM_RANKS(mtr);
  521. addrBits += 6; /* add 64 bits per DIMM */
  522. addrBits -= 20; /* divide by 2^^20 */
  523. addrBits -= 3; /* 8 bits per bytes */
  524. dinfo->megabytes = 1 << addrBits;
  525. edac_dbg(2, "\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr));
  526. edac_dbg(2, "\t\tELECTRICAL THROTTLING is %s\n",
  527. MTR_DIMMS_ETHROTTLE(mtr) ? "enabled" : "disabled");
  528. edac_dbg(2, "\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr));
  529. edac_dbg(2, "\t\tNUMRANK: %s\n",
  530. MTR_DIMM_RANKS(mtr) ? "double" : "single");
  531. edac_dbg(2, "\t\tNUMROW: %s\n",
  532. MTR_DIMM_ROWS(mtr) == 0 ? "8,192 - 13 rows" :
  533. MTR_DIMM_ROWS(mtr) == 1 ? "16,384 - 14 rows" :
  534. MTR_DIMM_ROWS(mtr) == 2 ? "32,768 - 15 rows" :
  535. "65,536 - 16 rows");
  536. edac_dbg(2, "\t\tNUMCOL: %s\n",
  537. MTR_DIMM_COLS(mtr) == 0 ? "1,024 - 10 columns" :
  538. MTR_DIMM_COLS(mtr) == 1 ? "2,048 - 11 columns" :
  539. MTR_DIMM_COLS(mtr) == 2 ? "4,096 - 12 columns" :
  540. "reserved");
  541. edac_dbg(2, "\t\tSIZE: %d MB\n", dinfo->megabytes);
  542. /*
  543. * The type of error detection actually depends of the
  544. * mode of operation. When it is just one single memory chip, at
  545. * socket 0, channel 0, it uses 8-byte-over-32-byte SECDED+ code.
  546. * In normal or mirrored mode, it uses Lockstep mode,
  547. * with the possibility of using an extended algorithm for x8 memories
  548. * See datasheet Sections 7.3.6 to 7.3.8
  549. */
  550. dimm->nr_pages = MiB_TO_PAGES(dinfo->megabytes);
  551. dimm->grain = 8;
  552. dimm->mtype = MEM_FB_DDR2;
  553. if (IS_SINGLE_MODE(pvt->mc_settings_a)) {
  554. dimm->edac_mode = EDAC_SECDED;
  555. edac_dbg(2, "\t\tECC code is 8-byte-over-32-byte SECDED+ code\n");
  556. } else {
  557. edac_dbg(2, "\t\tECC code is on Lockstep mode\n");
  558. if (MTR_DRAM_WIDTH(mtr) == 8)
  559. dimm->edac_mode = EDAC_S8ECD8ED;
  560. else
  561. dimm->edac_mode = EDAC_S4ECD4ED;
  562. }
  563. /* ask what device type on this row */
  564. if (MTR_DRAM_WIDTH(mtr) == 8) {
  565. edac_dbg(2, "\t\tScrub algorithm for x8 is on %s mode\n",
  566. IS_SCRBALGO_ENHANCED(pvt->mc_settings) ?
  567. "enhanced" : "normal");
  568. dimm->dtype = DEV_X8;
  569. } else
  570. dimm->dtype = DEV_X4;
  571. return mtr;
  572. }
  573. /**
  574. * print_dimm_size() - Prints dump of the memory organization
  575. * @pvt: pointer to the private data struct used by i7300 driver
  576. *
  577. * Useful for debug. If debug is disabled, this routine do nothing
  578. */
  579. static void print_dimm_size(struct i7300_pvt *pvt)
  580. {
  581. #ifdef CONFIG_EDAC_DEBUG
  582. struct i7300_dimm_info *dinfo;
  583. char *p;
  584. int space, n;
  585. int channel, slot;
  586. space = PAGE_SIZE;
  587. p = pvt->tmp_prt_buffer;
  588. n = snprintf(p, space, " ");
  589. p += n;
  590. space -= n;
  591. for (channel = 0; channel < MAX_CHANNELS; channel++) {
  592. n = snprintf(p, space, "channel %d | ", channel);
  593. p += n;
  594. space -= n;
  595. }
  596. edac_dbg(2, "%s\n", pvt->tmp_prt_buffer);
  597. p = pvt->tmp_prt_buffer;
  598. space = PAGE_SIZE;
  599. n = snprintf(p, space, "-------------------------------"
  600. "------------------------------");
  601. p += n;
  602. space -= n;
  603. edac_dbg(2, "%s\n", pvt->tmp_prt_buffer);
  604. p = pvt->tmp_prt_buffer;
  605. space = PAGE_SIZE;
  606. for (slot = 0; slot < MAX_SLOTS; slot++) {
  607. n = snprintf(p, space, "csrow/SLOT %d ", slot);
  608. p += n;
  609. space -= n;
  610. for (channel = 0; channel < MAX_CHANNELS; channel++) {
  611. dinfo = &pvt->dimm_info[slot][channel];
  612. n = snprintf(p, space, "%4d MB | ", dinfo->megabytes);
  613. p += n;
  614. space -= n;
  615. }
  616. edac_dbg(2, "%s\n", pvt->tmp_prt_buffer);
  617. p = pvt->tmp_prt_buffer;
  618. space = PAGE_SIZE;
  619. }
  620. n = snprintf(p, space, "-------------------------------"
  621. "------------------------------");
  622. p += n;
  623. space -= n;
  624. edac_dbg(2, "%s\n", pvt->tmp_prt_buffer);
  625. p = pvt->tmp_prt_buffer;
  626. space = PAGE_SIZE;
  627. #endif
  628. }
  629. /**
  630. * i7300_init_csrows() - Initialize the 'csrows' table within
  631. * the mci control structure with the
  632. * addressing of memory.
  633. * @mci: struct mem_ctl_info pointer
  634. */
  635. static int i7300_init_csrows(struct mem_ctl_info *mci)
  636. {
  637. struct i7300_pvt *pvt;
  638. struct i7300_dimm_info *dinfo;
  639. int rc = -ENODEV;
  640. int mtr;
  641. int ch, branch, slot, channel, max_channel, max_branch;
  642. struct dimm_info *dimm;
  643. pvt = mci->pvt_info;
  644. edac_dbg(2, "Memory Technology Registers:\n");
  645. if (IS_SINGLE_MODE(pvt->mc_settings_a)) {
  646. max_branch = 1;
  647. max_channel = 1;
  648. } else {
  649. max_branch = MAX_BRANCHES;
  650. max_channel = MAX_CH_PER_BRANCH;
  651. }
  652. /* Get the AMB present registers for the four channels */
  653. for (branch = 0; branch < max_branch; branch++) {
  654. /* Read and dump branch 0's MTRs */
  655. channel = to_channel(0, branch);
  656. pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch],
  657. AMBPRESENT_0,
  658. &pvt->ambpresent[channel]);
  659. edac_dbg(2, "\t\tAMB-present CH%d = 0x%x:\n",
  660. channel, pvt->ambpresent[channel]);
  661. if (max_channel == 1)
  662. continue;
  663. channel = to_channel(1, branch);
  664. pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch],
  665. AMBPRESENT_1,
  666. &pvt->ambpresent[channel]);
  667. edac_dbg(2, "\t\tAMB-present CH%d = 0x%x:\n",
  668. channel, pvt->ambpresent[channel]);
  669. }
  670. /* Get the set of MTR[0-7] regs by each branch */
  671. for (slot = 0; slot < MAX_SLOTS; slot++) {
  672. int where = mtr_regs[slot];
  673. for (branch = 0; branch < max_branch; branch++) {
  674. pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch],
  675. where,
  676. &pvt->mtr[slot][branch]);
  677. for (ch = 0; ch < max_channel; ch++) {
  678. int channel = to_channel(ch, branch);
  679. dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms,
  680. mci->n_layers, branch, ch, slot);
  681. dinfo = &pvt->dimm_info[slot][channel];
  682. mtr = decode_mtr(pvt, slot, ch, branch,
  683. dinfo, dimm);
  684. /* if no DIMMS on this row, continue */
  685. if (!MTR_DIMMS_PRESENT(mtr))
  686. continue;
  687. rc = 0;
  688. }
  689. }
  690. }
  691. return rc;
  692. }
  693. /**
  694. * decode_mir() - Decodes Memory Interleave Register (MIR) info
  695. * @int mir_no: number of the MIR register to decode
  696. * @mir: array with the MIR data cached on the driver
  697. */
  698. static void decode_mir(int mir_no, u16 mir[MAX_MIR])
  699. {
  700. if (mir[mir_no] & 3)
  701. edac_dbg(2, "MIR%d: limit= 0x%x Branch(es) that participate: %s %s\n",
  702. mir_no,
  703. (mir[mir_no] >> 4) & 0xfff,
  704. (mir[mir_no] & 1) ? "B0" : "",
  705. (mir[mir_no] & 2) ? "B1" : "");
  706. }
  707. /**
  708. * i7300_get_mc_regs() - Get the contents of the MC enumeration registers
  709. * @mci: struct mem_ctl_info pointer
  710. *
  711. * Data read is cached internally for its usage when needed
  712. */
  713. static int i7300_get_mc_regs(struct mem_ctl_info *mci)
  714. {
  715. struct i7300_pvt *pvt;
  716. u32 actual_tolm;
  717. int i, rc;
  718. pvt = mci->pvt_info;
  719. pci_read_config_dword(pvt->pci_dev_16_0_fsb_ctlr, AMBASE,
  720. (u32 *) &pvt->ambase);
  721. edac_dbg(2, "AMBASE= 0x%lx\n", (long unsigned int)pvt->ambase);
  722. /* Get the Branch Map regs */
  723. pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, TOLM, &pvt->tolm);
  724. pvt->tolm >>= 12;
  725. edac_dbg(2, "TOLM (number of 256M regions) =%u (0x%x)\n",
  726. pvt->tolm, pvt->tolm);
  727. actual_tolm = (u32) ((1000l * pvt->tolm) >> (30 - 28));
  728. edac_dbg(2, "Actual TOLM byte addr=%u.%03u GB (0x%x)\n",
  729. actual_tolm/1000, actual_tolm % 1000, pvt->tolm << 28);
  730. /* Get memory controller settings */
  731. pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, MC_SETTINGS,
  732. &pvt->mc_settings);
  733. pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, MC_SETTINGS_A,
  734. &pvt->mc_settings_a);
  735. if (IS_SINGLE_MODE(pvt->mc_settings_a))
  736. edac_dbg(0, "Memory controller operating on single mode\n");
  737. else
  738. edac_dbg(0, "Memory controller operating on %smirrored mode\n",
  739. IS_MIRRORED(pvt->mc_settings) ? "" : "non-");
  740. edac_dbg(0, "Error detection is %s\n",
  741. IS_ECC_ENABLED(pvt->mc_settings) ? "enabled" : "disabled");
  742. edac_dbg(0, "Retry is %s\n",
  743. IS_RETRY_ENABLED(pvt->mc_settings) ? "enabled" : "disabled");
  744. /* Get Memory Interleave Range registers */
  745. pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, MIR0,
  746. &pvt->mir[0]);
  747. pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, MIR1,
  748. &pvt->mir[1]);
  749. pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, MIR2,
  750. &pvt->mir[2]);
  751. /* Decode the MIR regs */
  752. for (i = 0; i < MAX_MIR; i++)
  753. decode_mir(i, pvt->mir);
  754. rc = i7300_init_csrows(mci);
  755. if (rc < 0)
  756. return rc;
  757. /* Go and determine the size of each DIMM and place in an
  758. * orderly matrix */
  759. print_dimm_size(pvt);
  760. return 0;
  761. }
  762. /*************************************************
  763. * i7300 Functions related to device probe/release
  764. *************************************************/
  765. /**
  766. * i7300_put_devices() - Release the PCI devices
  767. * @mci: struct mem_ctl_info pointer
  768. */
  769. static void i7300_put_devices(struct mem_ctl_info *mci)
  770. {
  771. struct i7300_pvt *pvt;
  772. int branch;
  773. pvt = mci->pvt_info;
  774. /* Decrement usage count for devices */
  775. for (branch = 0; branch < MAX_CH_PER_BRANCH; branch++)
  776. pci_dev_put(pvt->pci_dev_2x_0_fbd_branch[branch]);
  777. pci_dev_put(pvt->pci_dev_16_2_fsb_err_regs);
  778. pci_dev_put(pvt->pci_dev_16_1_fsb_addr_map);
  779. }
  780. /**
  781. * i7300_get_devices() - Find and perform 'get' operation on the MCH's
  782. * device/functions we want to reference for this driver
  783. * @mci: struct mem_ctl_info pointer
  784. *
  785. * Access and prepare the several devices for usage:
  786. * I7300 devices used by this driver:
  787. * Device 16, functions 0,1 and 2: PCI_DEVICE_ID_INTEL_I7300_MCH_ERR
  788. * Device 21 function 0: PCI_DEVICE_ID_INTEL_I7300_MCH_FB0
  789. * Device 22 function 0: PCI_DEVICE_ID_INTEL_I7300_MCH_FB1
  790. */
  791. static int i7300_get_devices(struct mem_ctl_info *mci)
  792. {
  793. struct i7300_pvt *pvt;
  794. struct pci_dev *pdev;
  795. pvt = mci->pvt_info;
  796. /* Attempt to 'get' the MCH register we want */
  797. pdev = NULL;
  798. while ((pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
  799. PCI_DEVICE_ID_INTEL_I7300_MCH_ERR,
  800. pdev))) {
  801. /* Store device 16 funcs 1 and 2 */
  802. switch (PCI_FUNC(pdev->devfn)) {
  803. case 1:
  804. if (!pvt->pci_dev_16_1_fsb_addr_map)
  805. pvt->pci_dev_16_1_fsb_addr_map =
  806. pci_dev_get(pdev);
  807. break;
  808. case 2:
  809. if (!pvt->pci_dev_16_2_fsb_err_regs)
  810. pvt->pci_dev_16_2_fsb_err_regs =
  811. pci_dev_get(pdev);
  812. break;
  813. }
  814. }
  815. if (!pvt->pci_dev_16_1_fsb_addr_map ||
  816. !pvt->pci_dev_16_2_fsb_err_regs) {
  817. /* At least one device was not found */
  818. i7300_printk(KERN_ERR,
  819. "'system address,Process Bus' device not found:"
  820. "vendor 0x%x device 0x%x ERR funcs (broken BIOS?)\n",
  821. PCI_VENDOR_ID_INTEL,
  822. PCI_DEVICE_ID_INTEL_I7300_MCH_ERR);
  823. goto error;
  824. }
  825. edac_dbg(1, "System Address, processor bus- PCI Bus ID: %s %x:%x\n",
  826. pci_name(pvt->pci_dev_16_0_fsb_ctlr),
  827. pvt->pci_dev_16_0_fsb_ctlr->vendor,
  828. pvt->pci_dev_16_0_fsb_ctlr->device);
  829. edac_dbg(1, "Branchmap, control and errors - PCI Bus ID: %s %x:%x\n",
  830. pci_name(pvt->pci_dev_16_1_fsb_addr_map),
  831. pvt->pci_dev_16_1_fsb_addr_map->vendor,
  832. pvt->pci_dev_16_1_fsb_addr_map->device);
  833. edac_dbg(1, "FSB Error Regs - PCI Bus ID: %s %x:%x\n",
  834. pci_name(pvt->pci_dev_16_2_fsb_err_regs),
  835. pvt->pci_dev_16_2_fsb_err_regs->vendor,
  836. pvt->pci_dev_16_2_fsb_err_regs->device);
  837. pvt->pci_dev_2x_0_fbd_branch[0] = pci_get_device(PCI_VENDOR_ID_INTEL,
  838. PCI_DEVICE_ID_INTEL_I7300_MCH_FB0,
  839. NULL);
  840. if (!pvt->pci_dev_2x_0_fbd_branch[0]) {
  841. i7300_printk(KERN_ERR,
  842. "MC: 'BRANCH 0' device not found:"
  843. "vendor 0x%x device 0x%x Func 0 (broken BIOS?)\n",
  844. PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_FB0);
  845. goto error;
  846. }
  847. pvt->pci_dev_2x_0_fbd_branch[1] = pci_get_device(PCI_VENDOR_ID_INTEL,
  848. PCI_DEVICE_ID_INTEL_I7300_MCH_FB1,
  849. NULL);
  850. if (!pvt->pci_dev_2x_0_fbd_branch[1]) {
  851. i7300_printk(KERN_ERR,
  852. "MC: 'BRANCH 1' device not found:"
  853. "vendor 0x%x device 0x%x Func 0 "
  854. "(broken BIOS?)\n",
  855. PCI_VENDOR_ID_INTEL,
  856. PCI_DEVICE_ID_INTEL_I7300_MCH_FB1);
  857. goto error;
  858. }
  859. return 0;
  860. error:
  861. i7300_put_devices(mci);
  862. return -ENODEV;
  863. }
  864. /**
  865. * i7300_init_one() - Probe for one instance of the device
  866. * @pdev: struct pci_dev pointer
  867. * @id: struct pci_device_id pointer - currently unused
  868. */
  869. static int i7300_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
  870. {
  871. struct mem_ctl_info *mci;
  872. struct edac_mc_layer layers[3];
  873. struct i7300_pvt *pvt;
  874. int rc;
  875. /* wake up device */
  876. rc = pci_enable_device(pdev);
  877. if (rc == -EIO)
  878. return rc;
  879. edac_dbg(0, "MC: pdev bus %u dev=0x%x fn=0x%x\n",
  880. pdev->bus->number,
  881. PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
  882. /* We only are looking for func 0 of the set */
  883. if (PCI_FUNC(pdev->devfn) != 0)
  884. return -ENODEV;
  885. /* allocate a new MC control structure */
  886. layers[0].type = EDAC_MC_LAYER_BRANCH;
  887. layers[0].size = MAX_BRANCHES;
  888. layers[0].is_virt_csrow = false;
  889. layers[1].type = EDAC_MC_LAYER_CHANNEL;
  890. layers[1].size = MAX_CH_PER_BRANCH;
  891. layers[1].is_virt_csrow = true;
  892. layers[2].type = EDAC_MC_LAYER_SLOT;
  893. layers[2].size = MAX_SLOTS;
  894. layers[2].is_virt_csrow = true;
  895. mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
  896. if (mci == NULL)
  897. return -ENOMEM;
  898. edac_dbg(0, "MC: mci = %p\n", mci);
  899. mci->pdev = &pdev->dev; /* record ptr to the generic device */
  900. pvt = mci->pvt_info;
  901. pvt->pci_dev_16_0_fsb_ctlr = pdev; /* Record this device in our private */
  902. pvt->tmp_prt_buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
  903. if (!pvt->tmp_prt_buffer) {
  904. edac_mc_free(mci);
  905. return -ENOMEM;
  906. }
  907. /* 'get' the pci devices we want to reserve for our use */
  908. if (i7300_get_devices(mci))
  909. goto fail0;
  910. mci->mc_idx = 0;
  911. mci->mtype_cap = MEM_FLAG_FB_DDR2;
  912. mci->edac_ctl_cap = EDAC_FLAG_NONE;
  913. mci->edac_cap = EDAC_FLAG_NONE;
  914. mci->mod_name = "i7300_edac.c";
  915. mci->mod_ver = I7300_REVISION;
  916. mci->ctl_name = i7300_devs[0].ctl_name;
  917. mci->dev_name = pci_name(pdev);
  918. mci->ctl_page_to_phys = NULL;
  919. /* Set the function pointer to an actual operation function */
  920. mci->edac_check = i7300_check_error;
  921. /* initialize the MC control structure 'csrows' table
  922. * with the mapping and control information */
  923. if (i7300_get_mc_regs(mci)) {
  924. edac_dbg(0, "MC: Setting mci->edac_cap to EDAC_FLAG_NONE because i7300_init_csrows() returned nonzero value\n");
  925. mci->edac_cap = EDAC_FLAG_NONE; /* no csrows found */
  926. } else {
  927. edac_dbg(1, "MC: Enable error reporting now\n");
  928. i7300_enable_error_reporting(mci);
  929. }
  930. /* add this new MC control structure to EDAC's list of MCs */
  931. if (edac_mc_add_mc(mci)) {
  932. edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
  933. /* FIXME: perhaps some code should go here that disables error
  934. * reporting if we just enabled it
  935. */
  936. goto fail1;
  937. }
  938. i7300_clear_error(mci);
  939. /* allocating generic PCI control info */
  940. i7300_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
  941. if (!i7300_pci) {
  942. printk(KERN_WARNING
  943. "%s(): Unable to create PCI control\n",
  944. __func__);
  945. printk(KERN_WARNING
  946. "%s(): PCI error report via EDAC not setup\n",
  947. __func__);
  948. }
  949. return 0;
  950. /* Error exit unwinding stack */
  951. fail1:
  952. i7300_put_devices(mci);
  953. fail0:
  954. kfree(pvt->tmp_prt_buffer);
  955. edac_mc_free(mci);
  956. return -ENODEV;
  957. }
  958. /**
  959. * i7300_remove_one() - Remove the driver
  960. * @pdev: struct pci_dev pointer
  961. */
  962. static void i7300_remove_one(struct pci_dev *pdev)
  963. {
  964. struct mem_ctl_info *mci;
  965. char *tmp;
  966. edac_dbg(0, "\n");
  967. if (i7300_pci)
  968. edac_pci_release_generic_ctl(i7300_pci);
  969. mci = edac_mc_del_mc(&pdev->dev);
  970. if (!mci)
  971. return;
  972. tmp = ((struct i7300_pvt *)mci->pvt_info)->tmp_prt_buffer;
  973. /* retrieve references to resources, and free those resources */
  974. i7300_put_devices(mci);
  975. kfree(tmp);
  976. edac_mc_free(mci);
  977. }
  978. /*
  979. * pci_device_id: table for which devices we are looking for
  980. *
  981. * Has only 8086:360c PCI ID
  982. */
  983. static const struct pci_device_id i7300_pci_tbl[] = {
  984. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_ERR)},
  985. {0,} /* 0 terminated list. */
  986. };
  987. MODULE_DEVICE_TABLE(pci, i7300_pci_tbl);
  988. /*
  989. * i7300_driver: pci_driver structure for this module
  990. */
  991. static struct pci_driver i7300_driver = {
  992. .name = "i7300_edac",
  993. .probe = i7300_init_one,
  994. .remove = i7300_remove_one,
  995. .id_table = i7300_pci_tbl,
  996. };
  997. /**
  998. * i7300_init() - Registers the driver
  999. */
  1000. static int __init i7300_init(void)
  1001. {
  1002. int pci_rc;
  1003. edac_dbg(2, "\n");
  1004. /* Ensure that the OPSTATE is set correctly for POLL or NMI */
  1005. opstate_init();
  1006. pci_rc = pci_register_driver(&i7300_driver);
  1007. return (pci_rc < 0) ? pci_rc : 0;
  1008. }
  1009. /**
  1010. * i7300_init() - Unregisters the driver
  1011. */
  1012. static void __exit i7300_exit(void)
  1013. {
  1014. edac_dbg(2, "\n");
  1015. pci_unregister_driver(&i7300_driver);
  1016. }
  1017. module_init(i7300_init);
  1018. module_exit(i7300_exit);
  1019. MODULE_LICENSE("GPL");
  1020. MODULE_AUTHOR("Mauro Carvalho Chehab");
  1021. MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)");
  1022. MODULE_DESCRIPTION("MC Driver for Intel I7300 memory controllers - "
  1023. I7300_REVISION);
  1024. module_param(edac_op_state, int, 0444);
  1025. MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");