amd64_edac_inj.c 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235
  1. #include "amd64_edac.h"
  2. static ssize_t amd64_inject_section_show(struct device *dev,
  3. struct device_attribute *mattr,
  4. char *buf)
  5. {
  6. struct mem_ctl_info *mci = to_mci(dev);
  7. struct amd64_pvt *pvt = mci->pvt_info;
  8. return sprintf(buf, "0x%x\n", pvt->injection.section);
  9. }
  10. /*
  11. * store error injection section value which refers to one of 4 16-byte sections
  12. * within a 64-byte cacheline
  13. *
  14. * range: 0..3
  15. */
  16. static ssize_t amd64_inject_section_store(struct device *dev,
  17. struct device_attribute *mattr,
  18. const char *data, size_t count)
  19. {
  20. struct mem_ctl_info *mci = to_mci(dev);
  21. struct amd64_pvt *pvt = mci->pvt_info;
  22. unsigned long value;
  23. int ret;
  24. ret = kstrtoul(data, 10, &value);
  25. if (ret < 0)
  26. return ret;
  27. if (value > 3) {
  28. amd64_warn("%s: invalid section 0x%lx\n", __func__, value);
  29. return -EINVAL;
  30. }
  31. pvt->injection.section = (u32) value;
  32. return count;
  33. }
  34. static ssize_t amd64_inject_word_show(struct device *dev,
  35. struct device_attribute *mattr,
  36. char *buf)
  37. {
  38. struct mem_ctl_info *mci = to_mci(dev);
  39. struct amd64_pvt *pvt = mci->pvt_info;
  40. return sprintf(buf, "0x%x\n", pvt->injection.word);
  41. }
  42. /*
  43. * store error injection word value which refers to one of 9 16-bit word of the
  44. * 16-byte (128-bit + ECC bits) section
  45. *
  46. * range: 0..8
  47. */
  48. static ssize_t amd64_inject_word_store(struct device *dev,
  49. struct device_attribute *mattr,
  50. const char *data, size_t count)
  51. {
  52. struct mem_ctl_info *mci = to_mci(dev);
  53. struct amd64_pvt *pvt = mci->pvt_info;
  54. unsigned long value;
  55. int ret;
  56. ret = kstrtoul(data, 10, &value);
  57. if (ret < 0)
  58. return ret;
  59. if (value > 8) {
  60. amd64_warn("%s: invalid word 0x%lx\n", __func__, value);
  61. return -EINVAL;
  62. }
  63. pvt->injection.word = (u32) value;
  64. return count;
  65. }
  66. static ssize_t amd64_inject_ecc_vector_show(struct device *dev,
  67. struct device_attribute *mattr,
  68. char *buf)
  69. {
  70. struct mem_ctl_info *mci = to_mci(dev);
  71. struct amd64_pvt *pvt = mci->pvt_info;
  72. return sprintf(buf, "0x%x\n", pvt->injection.bit_map);
  73. }
  74. /*
  75. * store 16 bit error injection vector which enables injecting errors to the
  76. * corresponding bit within the error injection word above. When used during a
  77. * DRAM ECC read, it holds the contents of the of the DRAM ECC bits.
  78. */
  79. static ssize_t amd64_inject_ecc_vector_store(struct device *dev,
  80. struct device_attribute *mattr,
  81. const char *data, size_t count)
  82. {
  83. struct mem_ctl_info *mci = to_mci(dev);
  84. struct amd64_pvt *pvt = mci->pvt_info;
  85. unsigned long value;
  86. int ret;
  87. ret = kstrtoul(data, 16, &value);
  88. if (ret < 0)
  89. return ret;
  90. if (value & 0xFFFF0000) {
  91. amd64_warn("%s: invalid EccVector: 0x%lx\n", __func__, value);
  92. return -EINVAL;
  93. }
  94. pvt->injection.bit_map = (u32) value;
  95. return count;
  96. }
  97. /*
  98. * Do a DRAM ECC read. Assemble staged values in the pvt area, format into
  99. * fields needed by the injection registers and read the NB Array Data Port.
  100. */
  101. static ssize_t amd64_inject_read_store(struct device *dev,
  102. struct device_attribute *mattr,
  103. const char *data, size_t count)
  104. {
  105. struct mem_ctl_info *mci = to_mci(dev);
  106. struct amd64_pvt *pvt = mci->pvt_info;
  107. unsigned long value;
  108. u32 section, word_bits;
  109. int ret;
  110. ret = kstrtoul(data, 10, &value);
  111. if (ret < 0)
  112. return ret;
  113. /* Form value to choose 16-byte section of cacheline */
  114. section = F10_NB_ARRAY_DRAM | SET_NB_ARRAY_ADDR(pvt->injection.section);
  115. amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_ADDR, section);
  116. word_bits = SET_NB_DRAM_INJECTION_READ(pvt->injection);
  117. /* Issue 'word' and 'bit' along with the READ request */
  118. amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, word_bits);
  119. edac_dbg(0, "section=0x%x word_bits=0x%x\n", section, word_bits);
  120. return count;
  121. }
  122. /*
  123. * Do a DRAM ECC write. Assemble staged values in the pvt area and format into
  124. * fields needed by the injection registers.
  125. */
  126. static ssize_t amd64_inject_write_store(struct device *dev,
  127. struct device_attribute *mattr,
  128. const char *data, size_t count)
  129. {
  130. struct mem_ctl_info *mci = to_mci(dev);
  131. struct amd64_pvt *pvt = mci->pvt_info;
  132. u32 section, word_bits, tmp;
  133. unsigned long value;
  134. int ret;
  135. ret = kstrtoul(data, 10, &value);
  136. if (ret < 0)
  137. return ret;
  138. /* Form value to choose 16-byte section of cacheline */
  139. section = F10_NB_ARRAY_DRAM | SET_NB_ARRAY_ADDR(pvt->injection.section);
  140. amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_ADDR, section);
  141. word_bits = SET_NB_DRAM_INJECTION_WRITE(pvt->injection);
  142. pr_notice_once("Don't forget to decrease MCE polling interval in\n"
  143. "/sys/bus/machinecheck/devices/machinecheck<CPUNUM>/check_interval\n"
  144. "so that you can get the error report faster.\n");
  145. on_each_cpu(disable_caches, NULL, 1);
  146. /* Issue 'word' and 'bit' along with the READ request */
  147. amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, word_bits);
  148. retry:
  149. /* wait until injection happens */
  150. amd64_read_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, &tmp);
  151. if (tmp & F10_NB_ARR_ECC_WR_REQ) {
  152. cpu_relax();
  153. goto retry;
  154. }
  155. on_each_cpu(enable_caches, NULL, 1);
  156. edac_dbg(0, "section=0x%x word_bits=0x%x\n", section, word_bits);
  157. return count;
  158. }
  159. /*
  160. * update NUM_INJ_ATTRS in case you add new members
  161. */
  162. static DEVICE_ATTR(inject_section, S_IRUGO | S_IWUSR,
  163. amd64_inject_section_show, amd64_inject_section_store);
  164. static DEVICE_ATTR(inject_word, S_IRUGO | S_IWUSR,
  165. amd64_inject_word_show, amd64_inject_word_store);
  166. static DEVICE_ATTR(inject_ecc_vector, S_IRUGO | S_IWUSR,
  167. amd64_inject_ecc_vector_show, amd64_inject_ecc_vector_store);
  168. static DEVICE_ATTR(inject_write, S_IWUSR,
  169. NULL, amd64_inject_write_store);
  170. static DEVICE_ATTR(inject_read, S_IWUSR,
  171. NULL, amd64_inject_read_store);
  172. static struct attribute *amd64_edac_inj_attrs[] = {
  173. &dev_attr_inject_section.attr,
  174. &dev_attr_inject_word.attr,
  175. &dev_attr_inject_ecc_vector.attr,
  176. &dev_attr_inject_write.attr,
  177. &dev_attr_inject_read.attr,
  178. NULL
  179. };
  180. static umode_t amd64_edac_inj_is_visible(struct kobject *kobj,
  181. struct attribute *attr, int idx)
  182. {
  183. struct device *dev = kobj_to_dev(kobj);
  184. struct mem_ctl_info *mci = container_of(dev, struct mem_ctl_info, dev);
  185. struct amd64_pvt *pvt = mci->pvt_info;
  186. if (pvt->fam < 0x10)
  187. return 0;
  188. return attr->mode;
  189. }
  190. const struct attribute_group amd64_edac_inj_group = {
  191. .attrs = amd64_edac_inj_attrs,
  192. .is_visible = amd64_edac_inj_is_visible,
  193. };