0008-haswell-NRI-Add-REUT-I-O-test-library.patch 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131
  1. From 49a7ef2401922a8492ba577a43235bcfba7ea822 Mon Sep 17 00:00:00 2001
  2. From: Angel Pons <th3fanbus@gmail.com>
  3. Date: Sun, 8 May 2022 00:11:29 +0200
  4. Subject: [PATCH 08/20] haswell NRI: Add REUT I/O test library
  5. Implement a library to run I/O tests using the REUT hardware.
  6. Change-Id: Id7b207cd0a3989ddd23c88c6b1f0cfa79d2c861f
  7. Signed-off-by: Angel Pons <th3fanbus@gmail.com>
  8. ---
  9. .../intel/haswell/native_raminit/Makefile.mk | 1 +
  10. .../haswell/native_raminit/raminit_native.h | 110 +++
  11. .../haswell/native_raminit/reg_structs.h | 121 +++
  12. .../intel/haswell/native_raminit/testing_io.c | 744 ++++++++++++++++++
  13. .../intel/haswell/registers/mchbar.h | 30 +
  14. 5 files changed, 1006 insertions(+)
  15. create mode 100644 src/northbridge/intel/haswell/native_raminit/testing_io.c
  16. diff --git a/src/northbridge/intel/haswell/native_raminit/Makefile.mk b/src/northbridge/intel/haswell/native_raminit/Makefile.mk
  17. index 8d7d4e4db0..6e1b365602 100644
  18. --- a/src/northbridge/intel/haswell/native_raminit/Makefile.mk
  19. +++ b/src/northbridge/intel/haswell/native_raminit/Makefile.mk
  20. @@ -12,4 +12,5 @@ romstage-y += raminit_native.c
  21. romstage-y += reut.c
  22. romstage-y += setup_wdb.c
  23. romstage-y += spd_bitmunching.c
  24. +romstage-y += testing_io.c
  25. romstage-y += timings_refresh.c
  26. diff --git a/src/northbridge/intel/haswell/native_raminit/raminit_native.h b/src/northbridge/intel/haswell/native_raminit/raminit_native.h
  27. index 1971b44b66..7f19fde4cc 100644
  28. --- a/src/northbridge/intel/haswell/native_raminit/raminit_native.h
  29. +++ b/src/northbridge/intel/haswell/native_raminit/raminit_native.h
  30. @@ -58,6 +58,88 @@ enum {
  31. REUT_MODE_NOP = 3, /* Normal operation mode */
  32. };
  33. +/* REUT error counter control */
  34. +enum {
  35. + COUNT_ERRORS_PER_CHANNEL = 0,
  36. + COUNT_ERRORS_PER_LANE = 1,
  37. + COUNT_ERRORS_PER_BYTE_GROUP = 2,
  38. + COUNT_ERRORS_PER_CHUNK = 3,
  39. +};
  40. +
  41. +enum wdb_dq_pattern {
  42. + BASIC_VA = 0,
  43. + SEGMENT_WDB,
  44. + CADB,
  45. + TURN_AROUND,
  46. + LMN_VA,
  47. + TURN_AROUND_WR,
  48. + TURN_AROUND_ODT,
  49. + RD_RD_TA,
  50. + RD_RD_TA_ALL,
  51. +};
  52. +
  53. +enum reut_cmd_pat {
  54. + PAT_WR_RD,
  55. + PAT_WR,
  56. + PAT_RD,
  57. + PAT_RD_WR_TA,
  58. + PAT_WR_RD_TA,
  59. + PAT_ODT_TA,
  60. +};
  61. +
  62. +/* REUT subsequence types (B = Base, O = Offset) */
  63. +enum {
  64. + SUBSEQ_B_RD = 0 << 22,
  65. + SUBSEQ_B_WR = 1 << 22,
  66. + SUBSEQ_B_RD_WR = 2 << 22,
  67. + SUBSEQ_B_WR_RD = 3 << 22,
  68. + SUBSEQ_O_RD = 4 << 22,
  69. + SUBSEQ_O_WR = 5 << 22,
  70. +};
  71. +
  72. +/* REUT mux control */
  73. +enum {
  74. + REUT_MUX_LMN = 0,
  75. + REUT_MUX_BTBUFFER = 1,
  76. + REUT_MUX_LFSR = 2,
  77. +};
  78. +
  79. +/* Increment scale */
  80. +enum {
  81. + SCALE_LOGARITHM = 0,
  82. + SCALE_LINEAR = 1,
  83. +};
  84. +
  85. +enum test_stop {
  86. + NSOE = 0, /* Never stop on error */
  87. + NTHSOE = 1, /* Stop on the nth error (we use n = 1) */
  88. + ABGSOE = 2, /* Stop on all byte groups error */
  89. + ALSOE = 3, /* Stop on all lanes error */
  90. +};
  91. +
  92. +struct wdb_pat {
  93. + uint32_t start_ptr; /* Starting pointer in WDB */
  94. + uint32_t stop_ptr; /* Stopping pointer in WDB */
  95. + uint16_t inc_rate; /* How quickly the WDB walks through cachelines */
  96. + uint8_t dq_pattern; /* DQ pattern to use (see enum wdb_dq_pattern above) */
  97. +};
  98. +
  99. +struct reut_pole {
  100. + uint16_t start;
  101. + uint16_t stop;
  102. + uint16_t order;
  103. + uint32_t inc_rate;
  104. + uint16_t inc_val;
  105. + bool wrap_trigger;
  106. +};
  107. +
  108. +struct reut_box {
  109. + struct reut_pole rank;
  110. + struct reut_pole bank;
  111. + struct reut_pole row;
  112. + struct reut_pole col;
  113. +};
  114. +
  115. enum command_training_iteration {
  116. CT_ITERATION_CLOCK = 0,
  117. CT_ITERATION_CMD_NORTH,
  118. @@ -199,6 +281,10 @@ struct sysinfo {
  119. uint16_t mr1[NUM_CHANNELS][NUM_SLOTS];
  120. uint16_t mr2[NUM_CHANNELS][NUM_SLOTS];
  121. uint16_t mr3[NUM_CHANNELS][NUM_SLOTS];
  122. +
  123. + uint8_t dq_pat;
  124. +
  125. + uint8_t dq_pat_lc;
  126. };
  127. static inline bool is_hsw_ult(void)
  128. @@ -340,6 +426,30 @@ void write_wdb_va_pat(
  129. void program_wdb_lfsr(const struct sysinfo *ctrl, bool cleanup);
  130. void setup_wdb(const struct sysinfo *ctrl);
  131. +void program_seq_addr(uint8_t channel, const struct reut_box *reut_addr, bool log_seq_addr);
  132. +void program_loop_count(const struct sysinfo *ctrl, uint8_t channel, uint8_t lc_exp);
  133. +
  134. +void setup_io_test(
  135. + struct sysinfo *ctrl,
  136. + uint8_t chanmask,
  137. + enum reut_cmd_pat cmd_pat,
  138. + uint16_t num_cl,
  139. + uint8_t lc,
  140. + const struct reut_box *reut_addr,
  141. + enum test_stop soe,
  142. + const struct wdb_pat *pat,
  143. + uint8_t en_cadb,
  144. + uint8_t subseq_wait);
  145. +
  146. +void setup_io_test_cadb(struct sysinfo *ctrl, uint8_t chanmask, uint8_t lc, enum test_stop soe);
  147. +void setup_io_test_basic_va(struct sysinfo *ctrl, uint8_t chm, uint8_t lc, enum test_stop soe);
  148. +void setup_io_test_mpr(struct sysinfo *ctrl, uint8_t chanmask, uint8_t lc, enum test_stop soe);
  149. +
  150. +uint8_t select_reut_ranks(struct sysinfo *ctrl, uint8_t channel, uint8_t rankmask);
  151. +
  152. +void run_mpr_io_test(bool clear_errors);
  153. +uint8_t run_io_test(struct sysinfo *ctrl, uint8_t chanmask, uint8_t dq_pat, bool clear_errors);
  154. +
  155. uint8_t get_rx_bias(const struct sysinfo *ctrl);
  156. uint8_t get_tCWL(uint32_t mem_clock_mhz);
  157. diff --git a/src/northbridge/intel/haswell/native_raminit/reg_structs.h b/src/northbridge/intel/haswell/native_raminit/reg_structs.h
  158. index 7aa8d8c8b2..b943259b91 100644
  159. --- a/src/northbridge/intel/haswell/native_raminit/reg_structs.h
  160. +++ b/src/northbridge/intel/haswell/native_raminit/reg_structs.h
  161. @@ -347,6 +347,54 @@ union reut_pat_cl_mux_lmn_reg {
  162. uint32_t raw;
  163. };
  164. +union reut_err_ctl_reg {
  165. + struct __packed {
  166. + uint32_t stop_on_nth_error : 6; // Bits 5:0
  167. + uint32_t : 6; // Bits 11:6
  168. + uint32_t stop_on_error_control : 2; // Bits 13:12
  169. + uint32_t : 2; // Bits 15:14
  170. + uint32_t selective_err_enable_chunk : 8; // Bits 23:16
  171. + uint32_t selective_err_enable_cacheline : 8; // Bits 31:24
  172. + };
  173. + uint32_t raw;
  174. +};
  175. +
  176. +union reut_pat_cadb_mux_ctrl_reg {
  177. + struct __packed {
  178. + uint32_t mux_0_ctrl : 2; // Bits 1:0
  179. + uint32_t : 2; // Bits 3:2
  180. + uint32_t mux_1_ctrl : 2; // Bits 5:4
  181. + uint32_t : 2; // Bits 7:6
  182. + uint32_t mux_2_ctrl : 2; // Bits 9:8
  183. + uint32_t : 6; // Bits 15:10
  184. + uint32_t sel_mux_0_ctrl : 2; // Bits 17:16
  185. + uint32_t : 2; // Bits 19:18
  186. + uint32_t sel_mux_1_ctrl : 2; // Bits 21:20
  187. + uint32_t : 2; // Bits 23:22
  188. + uint32_t sel_mux_2_ctrl : 2; // Bits 25:24
  189. + uint32_t : 6; // Bits 31:26
  190. + };
  191. + uint32_t raw;
  192. +};
  193. +
  194. +union reut_pat_wdb_cl_mux_cfg_reg {
  195. + struct __packed {
  196. + uint32_t mux_0_control : 2; // Bits 1:0
  197. + uint32_t : 1; // Bits 2:2
  198. + uint32_t mux_1_control : 2; // Bits 4:3
  199. + uint32_t : 1; // Bits 5:5
  200. + uint32_t mux_2_control : 2; // Bits 7:6
  201. + uint32_t : 6; // Bits 13:8
  202. + uint32_t ecc_replace_byte_ctl : 1; // Bits 14:14
  203. + uint32_t ecc_data_source_sel : 1; // Bits 15:15
  204. + uint32_t save_lfsr_seed_rate : 6; // Bits 21:16
  205. + uint32_t : 2; // Bits 23:22
  206. + uint32_t reload_lfsr_seed_rate : 3; // Bits 26:24
  207. + uint32_t : 5; // Bits 31:27
  208. + };
  209. + uint32_t raw;
  210. +};
  211. +
  212. union reut_pat_cadb_prog_reg {
  213. struct __packed {
  214. uint32_t addr : 16; // Bits 15:0
  215. @@ -366,6 +414,19 @@ union reut_pat_cadb_prog_reg {
  216. uint32_t raw32[2];
  217. };
  218. +union reut_pat_wdb_cl_ctrl_reg {
  219. + struct __packed {
  220. + uint32_t inc_rate : 5; // Bits 4:0
  221. + uint32_t inc_scale : 1; // Bits 5:5
  222. + uint32_t : 2; // Bits 7:6
  223. + uint32_t start_ptr : 6; // Bits 13:8
  224. + uint32_t : 2; // Bits 15:14
  225. + uint32_t end_ptr : 6; // Bits 21:16
  226. + uint32_t : 10; // Bits 31:22
  227. + };
  228. + uint32_t raw;
  229. +};
  230. +
  231. union reut_pat_cadb_mrs_reg {
  232. struct __packed {
  233. uint32_t delay_gap : 3; // Bits 2:0
  234. @@ -406,6 +467,66 @@ union reut_seq_cfg_reg {
  235. uint32_t raw32[2];
  236. };
  237. +union reut_seq_base_addr_reg {
  238. + struct __packed {
  239. + uint32_t : 3; // Bits 2:0
  240. + uint32_t col_addr : 8; // Bits 10:3
  241. + uint32_t : 13; // Bits 23:11
  242. + uint32_t row_addr : 16; // Bits 39:24
  243. + uint32_t : 8; // Bits 47:40
  244. + uint32_t bank_addr : 3; // Bits 50:48
  245. + uint32_t : 5; // Bits 55:51
  246. + uint32_t rank_addr : 3; // Bits 58:56
  247. + uint32_t : 5; // Bits 63:59
  248. + };
  249. + uint32_t raw32[2];
  250. + uint64_t raw;
  251. +};
  252. +
  253. +union reut_seq_misc_ctl_reg {
  254. + struct __packed {
  255. + uint32_t col_addr_order : 2; // Bits 1:0
  256. + uint32_t row_addr_order : 2; // Bits 3:2
  257. + uint32_t bank_addr_order : 2; // Bits 5:4
  258. + uint32_t rank_addr_order : 2; // Bits 7:6
  259. + uint32_t : 5; // Bits 12:8
  260. + uint32_t addr_invert_rate : 3; // Bits 15:13
  261. + uint32_t : 4; // Bits 19:16
  262. + uint32_t col_addr_invert_en : 1; // Bits 20:20
  263. + uint32_t row_addr_invert_en : 1; // Bits 21:21
  264. + uint32_t bank_addr_invert_en : 1; // Bits 22:22
  265. + uint32_t rank_addr_invert_en : 1; // Bits 23:23
  266. + uint32_t col_wrap_trigger_en : 1; // Bits 24:24
  267. + uint32_t row_wrap_trigger_en : 1; // Bits 25:25
  268. + uint32_t bank_wrap_trigger_en : 1; // Bits 26:26
  269. + uint32_t rank_wrap_trigger_en : 1; // Bits 27:27
  270. + uint32_t col_wrap_carry_en : 1; // Bits 28:28
  271. + uint32_t row_wrap_carry_en : 1; // Bits 29:29
  272. + uint32_t bank_wrap_carry_en : 1; // Bits 30:30
  273. + uint32_t rank_wrap_carry_en : 1; // Bits 31:31
  274. + };
  275. + uint32_t raw;
  276. +};
  277. +
  278. +union reut_seq_addr_inc_ctl_reg {
  279. + struct __packed {
  280. + uint32_t : 3; // Bits 2:0
  281. + uint32_t col_addr_increment : 8; // Bits 10:3
  282. + uint32_t : 1; // Bits 11:11
  283. + uint32_t col_addr_update : 8; // Bits 19:12
  284. + uint32_t row_addr_increment : 12; // Bits 31:20
  285. + uint32_t row_addr_update : 6; // Bits 37:32
  286. + uint32_t bank_addr_increment : 3; // Bits 40:38
  287. + uint32_t : 3; // Bits 43:41
  288. + uint32_t bank_addr_update : 8; // Bits 53:44
  289. + uint32_t rank_addr_increment : 3; // Bits 54:52
  290. + uint32_t : 1; // Bits 55:55
  291. + uint32_t rank_addr_update : 8; // Bits 63:56
  292. + };
  293. + uint64_t raw;
  294. + uint32_t raw32[2];
  295. +};
  296. +
  297. union reut_seq_ctl_reg {
  298. struct __packed {
  299. uint32_t start_test : 1; // Bits 0:0
  300. diff --git a/src/northbridge/intel/haswell/native_raminit/testing_io.c b/src/northbridge/intel/haswell/native_raminit/testing_io.c
  301. new file mode 100644
  302. index 0000000000..2632c238f8
  303. --- /dev/null
  304. +++ b/src/northbridge/intel/haswell/native_raminit/testing_io.c
  305. @@ -0,0 +1,744 @@
  306. +/* SPDX-License-Identifier: GPL-2.0-or-later */
  307. +
  308. +#include <console/console.h>
  309. +#include <delay.h>
  310. +#include <lib.h>
  311. +#include <northbridge/intel/haswell/haswell.h>
  312. +#include <timer.h>
  313. +#include <types.h>
  314. +
  315. +#include "raminit_native.h"
  316. +
  317. +static void set_cadb_patterns(const uint8_t channel, const uint16_t seeds[NUM_CADB_MUX_SEEDS])
  318. +{
  319. + for (uint8_t i = 0; i < NUM_CADB_MUX_SEEDS; i++)
  320. + mchbar_write32(REUT_ch_PAT_CADB_MUX_x(channel, i), seeds[i]);
  321. +}
  322. +
  323. +static void setup_cadb(
  324. + struct sysinfo *ctrl,
  325. + const uint8_t channel,
  326. + const uint8_t vic_spread,
  327. + const uint8_t vic_bit)
  328. +{
  329. + const bool lmn_en = false;
  330. +
  331. + /*
  332. + * Currently, always start writing at CADB row 0.
  333. + * Could add a start point parameter in the future.
  334. + */
  335. + mchbar_write8(REUT_ch_PAT_CADB_WRITE_PTR(channel), 0);
  336. + const uint8_t num_cadb_rows = 8;
  337. + for (uint8_t row = 0; row < num_cadb_rows; row++) {
  338. + const uint8_t lfsr0 = (row >> 0) & 1;
  339. + const uint8_t lfsr1 = (row >> 1) & 1;
  340. + uint64_t reg64 = 0;
  341. + for (uint8_t bit = 0; bit < 22; bit++) {
  342. + uint8_t bremap;
  343. + if (bit >= 19) {
  344. + /* (bremap in 40 .. 42) => CADB data control */
  345. + bremap = bit + 21;
  346. + } else if (bit >= 16) {
  347. + /* (bremap in 24 .. 26) => CADB data bank */
  348. + bremap = bit + 8;
  349. + } else {
  350. + /* (bremap in 0 .. 15) => CADB data address */
  351. + bremap = bit;
  352. + }
  353. + const uint8_t fine = bit % vic_spread;
  354. + reg64 |= ((uint64_t)(fine == vic_bit ? lfsr0 : lfsr1)) << bremap;
  355. + }
  356. + /*
  357. + * Write row. CADB pointer is auto incremented after every write. This must be
  358. + * a single 64-bit write, otherwise the CADB pointer will auto-increment twice.
  359. + */
  360. + mchbar_write64(REUT_ch_PAT_CADB_PROG(channel), reg64);
  361. + }
  362. + const union reut_pat_cadb_mux_ctrl_reg cadb_mux_ctrl = {
  363. + .mux_0_ctrl = lmn_en ? REUT_MUX_LMN : REUT_MUX_LFSR,
  364. + .mux_1_ctrl = REUT_MUX_LFSR,
  365. + .mux_2_ctrl = REUT_MUX_LFSR,
  366. + };
  367. + mchbar_write32(REUT_ch_PAT_CADB_MUX_CTRL(channel), cadb_mux_ctrl.raw);
  368. + const union reut_pat_cl_mux_lmn_reg cadb_cl_mux_lmn = {
  369. + .en_sweep_freq = 1,
  370. + .l_counter = 1,
  371. + .m_counter = 1,
  372. + .n_counter = 6,
  373. + };
  374. + mchbar_write32(REUT_ch_PAT_CADB_CL_MUX_LMN(channel), cadb_cl_mux_lmn.raw);
  375. + const uint16_t cadb_mux_seeds[NUM_CADB_MUX_SEEDS] = { 0x0ea1, 0xbeef, 0xdead };
  376. + set_cadb_patterns(channel, cadb_mux_seeds);
  377. +}
  378. +
  379. +static uint32_t calc_rate(const uint32_t rate, const uint32_t lim, const uint8_t scale_bit)
  380. +{
  381. + return rate > lim ? log2_ceil(rate - 1) : BIT(scale_bit) | rate;
  382. +}
  383. +
  384. +void program_seq_addr(
  385. + const uint8_t channel,
  386. + const struct reut_box *reut_addr,
  387. + const bool log_seq_addr)
  388. +{
  389. + const int loglevel = log_seq_addr ? BIOS_ERR : BIOS_NEVER;
  390. + const uint32_t div = 8;
  391. + union reut_seq_base_addr_reg reut_seq_addr_start = {
  392. + .col_addr = reut_addr->col.start / div,
  393. + .row_addr = reut_addr->row.start,
  394. + .bank_addr = reut_addr->bank.start,
  395. + .rank_addr = reut_addr->rank.start,
  396. + };
  397. + mchbar_write64(REUT_ch_SEQ_ADDR_START(channel), reut_seq_addr_start.raw);
  398. + reut_seq_addr_start.raw = mchbar_read64(REUT_ch_SEQ_ADDR_START(channel));
  399. + printk(loglevel, "\tStart column: %u\n", reut_seq_addr_start.col_addr);
  400. + printk(loglevel, "\tStart row: %u\n", reut_seq_addr_start.row_addr);
  401. + printk(loglevel, "\tStart bank: %u\n", reut_seq_addr_start.bank_addr);
  402. + printk(loglevel, "\tStart rank: %u\n", reut_seq_addr_start.rank_addr);
  403. + printk(loglevel, "\n");
  404. +
  405. + union reut_seq_base_addr_reg reut_seq_addr_stop = {
  406. + .col_addr = reut_addr->col.stop / div,
  407. + .row_addr = reut_addr->row.stop,
  408. + .bank_addr = reut_addr->bank.stop,
  409. + .rank_addr = reut_addr->rank.stop,
  410. + };
  411. + mchbar_write64(REUT_ch_SEQ_ADDR_WRAP(channel), reut_seq_addr_stop.raw);
  412. + reut_seq_addr_stop.raw = mchbar_read64(REUT_ch_SEQ_ADDR_WRAP(channel));
  413. + printk(loglevel, "\tStop column: %u\n", reut_seq_addr_stop.col_addr);
  414. + printk(loglevel, "\tStop row: %u\n", reut_seq_addr_stop.row_addr);
  415. + printk(loglevel, "\tStop bank: %u\n", reut_seq_addr_stop.bank_addr);
  416. + printk(loglevel, "\tStop rank: %u\n", reut_seq_addr_stop.rank_addr);
  417. + printk(loglevel, "\n");
  418. +
  419. + union reut_seq_misc_ctl_reg reut_seq_misc_ctl = {
  420. + .col_wrap_trigger_en = reut_addr->col.wrap_trigger,
  421. + .row_wrap_trigger_en = reut_addr->row.wrap_trigger,
  422. + .bank_wrap_trigger_en = reut_addr->bank.wrap_trigger,
  423. + .rank_wrap_trigger_en = reut_addr->rank.wrap_trigger,
  424. + };
  425. + mchbar_write32(REUT_ch_SEQ_MISC_CTL(channel), reut_seq_misc_ctl.raw);
  426. + printk(loglevel, "\tWrap column: %u\n", reut_addr->col.wrap_trigger);
  427. + printk(loglevel, "\tWrap row: %u\n", reut_addr->row.wrap_trigger);
  428. + printk(loglevel, "\tWrap bank: %u\n", reut_addr->bank.wrap_trigger);
  429. + printk(loglevel, "\tWrap rank: %u\n", reut_addr->rank.wrap_trigger);
  430. + printk(loglevel, "\n");
  431. +
  432. + union reut_seq_addr_inc_ctl_reg reut_seq_addr_inc_ctl = {
  433. + .col_addr_update = calc_rate(reut_addr->col.inc_rate, 31, 7),
  434. + .row_addr_update = calc_rate(reut_addr->row.inc_rate, 15, 5),
  435. + .bank_addr_update = calc_rate(reut_addr->bank.inc_rate, 31, 7),
  436. + .rank_addr_update = calc_rate(reut_addr->rank.inc_rate, 31, 7),
  437. + .col_addr_increment = reut_addr->col.inc_val,
  438. + .row_addr_increment = reut_addr->row.inc_val,
  439. + .bank_addr_increment = reut_addr->bank.inc_val,
  440. + .rank_addr_increment = reut_addr->rank.inc_val,
  441. + };
  442. + printk(loglevel, "\tUpdRate column: %u\n", reut_addr->col.inc_rate);
  443. + printk(loglevel, "\tUpdRate row: %u\n", reut_addr->row.inc_rate);
  444. + printk(loglevel, "\tUpdRate bank: %u\n", reut_addr->bank.inc_rate);
  445. + printk(loglevel, "\tUpdRate rank: %u\n", reut_addr->rank.inc_rate);
  446. + printk(loglevel, "\n");
  447. + printk(loglevel, "\tUpdRateCR column: %u\n", reut_seq_addr_inc_ctl.col_addr_update);
  448. + printk(loglevel, "\tUpdRateCR row: %u\n", reut_seq_addr_inc_ctl.row_addr_update);
  449. + printk(loglevel, "\tUpdRateCR bank: %u\n", reut_seq_addr_inc_ctl.bank_addr_update);
  450. + printk(loglevel, "\tUpdRateCR rank: %u\n", reut_seq_addr_inc_ctl.rank_addr_update);
  451. + printk(loglevel, "\n");
  452. + printk(loglevel, "\tUpdInc column: %u\n", reut_seq_addr_inc_ctl.col_addr_increment);
  453. + printk(loglevel, "\tUpdInc row: %u\n", reut_seq_addr_inc_ctl.row_addr_increment);
  454. + printk(loglevel, "\tUpdInc bank: %u\n", reut_seq_addr_inc_ctl.bank_addr_increment);
  455. + printk(loglevel, "\tUpdInc rank: %u\n", reut_seq_addr_inc_ctl.rank_addr_increment);
  456. + printk(loglevel, "\n");
  457. + mchbar_write64(REUT_ch_SEQ_ADDR_INC_CTL(channel), reut_seq_addr_inc_ctl.raw);
  458. +}
  459. +
  460. +/*
  461. + * Early steppings take exponential (base 2) loopcount values,
  462. + * but later steppings take linear loopcount values elsewhere.
  463. + * Address the differences in register offset and format here.
  464. + */
  465. +void program_loop_count(const struct sysinfo *ctrl, const uint8_t channel, const uint8_t lc_exp)
  466. +{
  467. + if (ctrl->stepping >= STEPPING_C0) {
  468. + const uint32_t loopcount = lc_exp >= 32 ? 0 : BIT(lc_exp);
  469. + mchbar_write32(HSW_REUT_ch_SEQ_LOOP_COUNT(channel), loopcount);
  470. + } else {
  471. + const uint8_t loopcount = lc_exp >= 32 ? 0 : lc_exp + 1;
  472. + union reut_seq_cfg_reg reut_seq_cfg = {
  473. + .raw = mchbar_read64(REUT_ch_SEQ_CFG(channel)),
  474. + };
  475. + reut_seq_cfg.early_steppings_loop_count = loopcount;
  476. + mchbar_write64(REUT_ch_SEQ_CFG(channel), reut_seq_cfg.raw);
  477. + }
  478. +}
  479. +
  480. +static inline void write_subseq(const uint8_t channel, const uint8_t idx, const uint32_t ssq)
  481. +{
  482. + mchbar_write32(REUT_ch_SUBSEQ_x_CTL(channel, idx), ssq);
  483. +}
  484. +
  485. +static void program_subseq(
  486. + struct sysinfo *const ctrl,
  487. + const uint8_t channel,
  488. + const enum reut_cmd_pat cmd_pat,
  489. + const uint32_t ss_a,
  490. + const uint32_t ss_b)
  491. +{
  492. + switch (cmd_pat) {
  493. + case PAT_WR_RD_TA:
  494. + write_subseq(channel, 0, ss_a | SUBSEQ_B_WR);
  495. + for (uint8_t i = 1; i < 7; i++)
  496. + write_subseq(channel, i, ss_b | SUBSEQ_B_RD_WR);
  497. +
  498. + write_subseq(channel, 7, ss_a | SUBSEQ_B_RD);
  499. + break;
  500. + case PAT_RD_WR_TA:
  501. + write_subseq(channel, 0, ss_b | SUBSEQ_B_WR_RD);
  502. + break;
  503. + case PAT_ODT_TA:
  504. + write_subseq(channel, 0, ss_a | SUBSEQ_B_WR);
  505. + write_subseq(channel, 1, ss_b | SUBSEQ_B_RD_WR);
  506. + write_subseq(channel, 2, ss_a | SUBSEQ_B_RD);
  507. + write_subseq(channel, 3, ss_b | SUBSEQ_B_WR_RD);
  508. + break;
  509. + default:
  510. + write_subseq(channel, 0, ss_a | SUBSEQ_B_WR);
  511. + write_subseq(channel, 1, ss_a | SUBSEQ_B_RD);
  512. + break;
  513. + }
  514. +}
  515. +
  516. +void setup_io_test(
  517. + struct sysinfo *ctrl,
  518. + const uint8_t chanmask,
  519. + const enum reut_cmd_pat cmd_pat,
  520. + const uint16_t num_cl,
  521. + const uint8_t lc,
  522. + const struct reut_box *const reut_addr,
  523. + const enum test_stop soe,
  524. + const struct wdb_pat *const pat,
  525. + const uint8_t en_cadb,
  526. + const uint8_t subseq_wait)
  527. +{
  528. + if (!chanmask) {
  529. + printk(BIOS_ERR, "\n%s: chanmask is invalid\n", __func__);
  530. + return;
  531. + }
  532. +
  533. + /*
  534. + * Prepare variables needed for both channels.
  535. + * Check for the cases where this MUST be 1: when
  536. + * we manually walk through subseq ODT and TA Wr.
  537. + */
  538. + uint8_t lc_exp = MAX(lc - log2_ceil(num_cl), 0);
  539. + if (cmd_pat == PAT_WR_RD_TA || cmd_pat == PAT_ODT_TA)
  540. + lc_exp = 0;
  541. +
  542. + uint8_t num_clcr;
  543. + if (num_cl > 127) {
  544. + /* Assume exponential number */
  545. + num_clcr = log2_ceil(num_cl);
  546. + } else {
  547. + /* Set number of cache lines as linear number */
  548. + num_clcr = num_cl | BIT(7);
  549. + }
  550. +
  551. + const uint16_t num_cl2 = 2 * num_cl;
  552. + uint8_t num_cl2cr;
  553. + if (num_cl2 > 127) {
  554. + /* Assume exponential number */
  555. + num_cl2cr = log2_ceil(num_cl2);
  556. + } else {
  557. + /* Set number of cache lines as linear number */
  558. + num_cl2cr = num_cl2 | BIT(7);
  559. + }
  560. +
  561. + for (uint8_t channel = 0; channel < NUM_CHANNELS; channel++) {
  562. + if (!(chanmask & BIT(channel))) {
  563. + union reut_seq_cfg_reg reut_seq_cfg = {
  564. + .raw = mchbar_read64(REUT_ch_SEQ_CFG(channel)),
  565. + };
  566. + reut_seq_cfg.global_control = 0;
  567. + mchbar_write64(REUT_ch_SEQ_CFG(channel), reut_seq_cfg.raw);
  568. + continue;
  569. + }
  570. +
  571. + /*
  572. + * Program CADB
  573. + */
  574. + mchbar_write8(REUT_ch_MISC_PAT_CADB_CTRL(channel), !!en_cadb);
  575. + if (en_cadb)
  576. + setup_cadb(ctrl, channel, 7, 8);
  577. +
  578. + /*
  579. + * Program sequence
  580. + */
  581. + uint8_t subseq_start = 0;
  582. + uint8_t subseq_end = 0;
  583. + switch (cmd_pat) {
  584. + case PAT_WR_RD:
  585. + subseq_end = 1;
  586. + break;
  587. + case PAT_WR:
  588. + break;
  589. + case PAT_RD:
  590. + subseq_start = 1;
  591. + subseq_end = 1;
  592. + break;
  593. + case PAT_RD_WR_TA:
  594. + break;
  595. + case PAT_WR_RD_TA:
  596. + subseq_end = 7;
  597. + break;
  598. + case PAT_ODT_TA:
  599. + subseq_end = 3;
  600. + break;
  601. + default:
  602. + die("\n%s: Pattern type %u is invalid\n", __func__, cmd_pat);
  603. + }
  604. + const union reut_seq_cfg_reg reut_seq_cfg = {
  605. + .global_control = 1,
  606. + .initialization_mode = REUT_MODE_TEST,
  607. + .subsequence_start_pointer = subseq_start,
  608. + .subsequence_end_pointer = subseq_end,
  609. + .start_test_delay = 2,
  610. + };
  611. + mchbar_write64(REUT_ch_SEQ_CFG(channel), reut_seq_cfg.raw);
  612. + program_loop_count(ctrl, channel, lc_exp);
  613. + mchbar_write32(REUT_ch_SEQ_CTL(channel), (union reut_seq_ctl_reg) {
  614. + .clear_errors = 1,
  615. + }.raw);
  616. +
  617. + /*
  618. + * Program subsequences
  619. + */
  620. + uint32_t subseq_a = 0;
  621. +
  622. + /* Number of cachelines and scale */
  623. + subseq_a |= (num_clcr & 0x00ff) << 0;
  624. + subseq_a |= (subseq_wait & 0x3fff) << 8;
  625. +
  626. + /* Reset current base address to start */
  627. + subseq_a |= BIT(27);
  628. +
  629. + uint32_t subseq_b = 0;
  630. +
  631. + /* Number of cachelines and scale */
  632. + subseq_b |= (num_cl2cr & 0x00ff) << 0;
  633. + subseq_b |= (subseq_wait & 0x3fff) << 8;
  634. +
  635. + /* Reset current base address to start */
  636. + subseq_b |= BIT(27);
  637. +
  638. + program_subseq(ctrl, channel, cmd_pat, subseq_a, subseq_b);
  639. +
  640. + /* Program sequence address */
  641. + program_seq_addr(channel, reut_addr, false);
  642. +
  643. + /* Program WDB */
  644. + const bool is_linear = pat->inc_rate < 32;
  645. + mchbar_write32(REUT_ch_WDB_CL_CTRL(channel), (union reut_pat_wdb_cl_ctrl_reg) {
  646. + .start_ptr = pat->start_ptr,
  647. + .end_ptr = pat->stop_ptr,
  648. + .inc_rate = is_linear ? pat->inc_rate : log2_ceil(pat->inc_rate),
  649. + .inc_scale = is_linear,
  650. + }.raw);
  651. +
  652. + /* Enable LMN in LMN or CADB modes, used to create lots of supply noise */
  653. + const bool use_lmn = pat->dq_pattern == LMN_VA || pat->dq_pattern == CADB;
  654. + union reut_pat_wdb_cl_mux_cfg_reg pat_wdb_cl_mux_cfg = {
  655. + .mux_0_control = use_lmn ? REUT_MUX_LMN : REUT_MUX_LFSR,
  656. + .mux_1_control = REUT_MUX_LFSR,
  657. + .mux_2_control = REUT_MUX_LFSR,
  658. + .ecc_data_source_sel = 1,
  659. + };
  660. +
  661. + /* Program LFSR save/restore, too complex unless everything is power of 2 */
  662. + if (cmd_pat == PAT_ODT_TA || cmd_pat == PAT_WR_RD_TA) {
  663. + pat_wdb_cl_mux_cfg.reload_lfsr_seed_rate = log2_ceil(num_cl) + 1;
  664. + pat_wdb_cl_mux_cfg.save_lfsr_seed_rate = 1;
  665. + }
  666. + mchbar_write32(REUT_ch_PAT_WDB_CL_MUX_CFG(channel), pat_wdb_cl_mux_cfg.raw);
  667. +
  668. + /* Inversion mask is not used */
  669. + mchbar_write32(REUT_ch_PAT_WDB_INV(channel), 0);
  670. +
  671. + /* Program error checking */
  672. + const union reut_err_ctl_reg reut_err_ctl = {
  673. + .selective_err_enable_cacheline = 0xff,
  674. + .selective_err_enable_chunk = 0xff,
  675. + .stop_on_error_control = soe,
  676. + .stop_on_nth_error = 1,
  677. + };
  678. + mchbar_write32(REUT_ch_ERR_CONTROL(channel), reut_err_ctl.raw);
  679. + mchbar_write64(REUT_ch_ERR_DATA_MASK(channel), 0);
  680. + mchbar_write8(REUT_ch_ERR_ECC_MASK(channel), 0);
  681. + }
  682. +
  683. + /* Always do a ZQ short before the beginning of a test */
  684. + reut_issue_zq(ctrl, chanmask, ZQ_SHORT);
  685. +}
  686. +
  687. +void setup_io_test_cadb(
  688. + struct sysinfo *ctrl,
  689. + const uint8_t chanmask,
  690. + const uint8_t lc,
  691. + const enum test_stop soe)
  692. +{
  693. + const struct reut_box reut_addr = {
  694. + .rank = {
  695. + .start = 0,
  696. + .stop = 0,
  697. + .inc_rate = 32,
  698. + .inc_val = 1,
  699. + },
  700. + .bank = {
  701. + .start = 0,
  702. + .stop = 7,
  703. + .inc_rate = 3,
  704. + .inc_val = 1,
  705. + },
  706. + .row = {
  707. + .start = 0,
  708. + .stop = 2047,
  709. + .inc_rate = 3,
  710. + .inc_val = 73,
  711. + },
  712. + .col = {
  713. + .start = 0,
  714. + .stop = 1023,
  715. + .inc_rate = 0,
  716. + .inc_val = 53,
  717. + },
  718. + };
  719. + const struct wdb_pat pattern = {
  720. + .start_ptr = 0,
  721. + .stop_ptr = 9,
  722. + .inc_rate = 4,
  723. + .dq_pattern = CADB,
  724. + };
  725. + setup_io_test(
  726. + ctrl,
  727. + chanmask,
  728. + PAT_WR_RD,
  729. + 128,
  730. + lc,
  731. + &reut_addr,
  732. + soe,
  733. + &pattern,
  734. + 1,
  735. + 0);
  736. +
  737. + ctrl->dq_pat_lc = MAX(lc - 2 - 3, 0) + 1;
  738. + ctrl->dq_pat = CADB;
  739. +}
  740. +
  741. +void setup_io_test_basic_va(
  742. + struct sysinfo *ctrl,
  743. + const uint8_t chanmask,
  744. + const uint8_t lc,
  745. + const enum test_stop soe)
  746. +{
  747. + const uint32_t spread = 8;
  748. + const struct reut_box reut_addr = {
  749. + .rank = {
  750. + .start = 0,
  751. + .stop = 0,
  752. + .inc_rate = 32,
  753. + .inc_val = 1,
  754. + },
  755. + .col = {
  756. + .start = 0,
  757. + .stop = 1023,
  758. + .inc_rate = 0,
  759. + .inc_val = 1,
  760. + },
  761. + };
  762. + const struct wdb_pat pattern = {
  763. + .start_ptr = 0,
  764. + .stop_ptr = spread - 1,
  765. + .inc_rate = 4,
  766. + .dq_pattern = BASIC_VA,
  767. + };
  768. + setup_io_test(
  769. + ctrl,
  770. + chanmask,
  771. + PAT_WR_RD,
  772. + 128,
  773. + lc,
  774. + &reut_addr,
  775. + soe,
  776. + &pattern,
  777. + 0,
  778. + 0);
  779. +
  780. + ctrl->dq_pat_lc = MAX(lc - 8, 0) + 1;
  781. + ctrl->dq_pat = BASIC_VA;
  782. +}
  783. +
  784. +void setup_io_test_mpr(
  785. + struct sysinfo *ctrl,
  786. + const uint8_t chanmask,
  787. + const uint8_t lc,
  788. + const enum test_stop soe)
  789. +{
  790. + const struct reut_box reut_addr_ddr = {
  791. + .rank = {
  792. + .start = 0,
  793. + .stop = 0,
  794. + .inc_rate = 32,
  795. + .inc_val = 1,
  796. + },
  797. + .col = {
  798. + .start = 0,
  799. + .stop = 1023,
  800. + .inc_rate = 0,
  801. + .inc_val = 1,
  802. + },
  803. + };
  804. + const struct reut_box reut_addr_lpddr = {
  805. + .bank = {
  806. + .start = 4,
  807. + .stop = 4,
  808. + .inc_rate = 0,
  809. + .inc_val = 0,
  810. + },
  811. + };
  812. + const struct wdb_pat pattern = {
  813. + .start_ptr = 0,
  814. + .stop_ptr = 9,
  815. + .inc_rate = 4,
  816. + .dq_pattern = BASIC_VA,
  817. + };
  818. + setup_io_test(
  819. + ctrl,
  820. + chanmask,
  821. + PAT_RD,
  822. + 128,
  823. + lc,
  824. + ctrl->lpddr ? &reut_addr_lpddr : &reut_addr_ddr,
  825. + soe,
  826. + &pattern,
  827. + 0,
  828. + 0);
  829. +
  830. + ctrl->dq_pat_lc = 1;
  831. + ctrl->dq_pat = BASIC_VA;
  832. +}
  833. +
  834. +uint8_t select_reut_ranks(struct sysinfo *ctrl, const uint8_t channel, uint8_t rankmask)
  835. +{
  836. + rankmask &= ctrl->rankmap[channel];
  837. +
  838. + uint8_t rank_count = 0;
  839. + uint32_t rank_log_to_phys = 0;
  840. + for (uint8_t rank = 0; rank < NUM_SLOTRANKS; rank++) {
  841. + if (!rank_in_mask(rank, rankmask))
  842. + continue;
  843. +
  844. + rank_log_to_phys |= rank << (4 * rank_count);
  845. + rank_count++;
  846. + }
  847. + mchbar_write32(REUT_ch_RANK_LOG_TO_PHYS(channel), rank_log_to_phys);
  848. +
  849. + union reut_seq_cfg_reg reut_seq_cfg = {
  850. + .raw = mchbar_read64(REUT_ch_SEQ_CFG(channel)),
  851. + };
  852. + if (!rank_count) {
  853. + reut_seq_cfg.global_control = 0;
  854. + mchbar_write64(REUT_ch_SEQ_CFG(channel), reut_seq_cfg.raw);
  855. + return 0;
  856. + }
  857. + union reut_seq_base_addr_reg reut_seq_addr_stop = {
  858. + .raw = mchbar_read64(REUT_ch_SEQ_ADDR_WRAP(channel)),
  859. + };
  860. + reut_seq_addr_stop.rank_addr = rank_count - 1;
  861. + mchbar_write64(REUT_ch_SEQ_ADDR_WRAP(channel), reut_seq_addr_stop.raw);
  862. +
  863. + reut_seq_cfg.global_control = 1;
  864. + mchbar_write64(REUT_ch_SEQ_CFG(channel), reut_seq_cfg.raw);
  865. + return BIT(channel);
  866. +}
  867. +
  868. +void run_mpr_io_test(const bool clear_errors)
  869. +{
  870. + io_reset();
  871. + mchbar_write32(REUT_GLOBAL_CTL, (union reut_seq_ctl_reg) {
  872. + .start_test = 1,
  873. + .clear_errors = clear_errors,
  874. + }.raw);
  875. + tick_delay(2);
  876. + io_reset();
  877. + tick_delay(2);
  878. + mchbar_write32(REUT_GLOBAL_CTL, (union reut_seq_ctl_reg) {
  879. + .stop_test = 1,
  880. + }.raw);
  881. +}
  882. +
  883. +static uint8_t get_num_tests(const uint8_t dq_pat)
  884. +{
  885. + switch (dq_pat) {
  886. + case SEGMENT_WDB: return 4;
  887. + case CADB: return 7;
  888. + case TURN_AROUND_WR: return 8;
  889. + case TURN_AROUND_ODT: return 4;
  890. + case RD_RD_TA: return 2;
  891. + case RD_RD_TA_ALL: return 8;
  892. + default: return 1;
  893. + }
  894. +}
  895. +
  896. +uint8_t run_io_test(
  897. + struct sysinfo *const ctrl,
  898. + const uint8_t chanmask,
  899. + const uint8_t dq_pat,
  900. + const bool clear_errors)
  901. +{
  902. + /* SEGMENT_WDB only runs 4 tests */
  903. + const uint8_t segment_wdb_lc[4] = { 0, 0, 4, 2 };
  904. + const union reut_pat_wdb_cl_ctrl_reg pat_wdb_cl[4] = {
  905. + [0] = {
  906. + .start_ptr = 0,
  907. + .end_ptr = 9,
  908. + .inc_rate = 25,
  909. + .inc_scale = SCALE_LINEAR,
  910. + },
  911. + [1] = {
  912. + .start_ptr = 0,
  913. + .end_ptr = 9,
  914. + .inc_rate = 25,
  915. + .inc_scale = SCALE_LINEAR,
  916. + },
  917. + [2] = {
  918. + .start_ptr = 10,
  919. + .end_ptr = 63,
  920. + .inc_rate = 19,
  921. + .inc_scale = SCALE_LINEAR,
  922. + },
  923. + [3] = {
  924. + .start_ptr = 10,
  925. + .end_ptr = 63,
  926. + .inc_rate = 10,
  927. + .inc_scale = SCALE_LINEAR,
  928. + },
  929. + };
  930. + const bool is_turnaround = dq_pat == RD_RD_TA || dq_pat == RD_RD_TA_ALL;
  931. + const uint8_t num_tests = get_num_tests(dq_pat);
  932. + union tc_bank_rank_a_reg tc_bank_rank_a[NUM_CHANNELS] = { 0 };
  933. + if (is_turnaround) {
  934. + for (uint8_t channel = 0; channel < NUM_CHANNELS; channel++) {
  935. + if (!(chanmask & BIT(channel)))
  936. + continue;
  937. +
  938. + tc_bank_rank_a[channel].raw = ctrl->tc_bankrank_a[channel].raw;
  939. + }
  940. + }
  941. + for (uint8_t t = 0; t < num_tests; t++) {
  942. + for (uint8_t channel = 0; channel < NUM_CHANNELS; channel++) {
  943. + if (!(chanmask & BIT(channel)))
  944. + continue;
  945. +
  946. + if (dq_pat == SEGMENT_WDB) {
  947. + mchbar_write32(REUT_ch_WDB_CL_CTRL(channel), pat_wdb_cl[t].raw);
  948. + /*
  949. + * Skip programming LFSR save/restore. Too complex
  950. + * unless power of 2. Program desired loopcount.
  951. + */
  952. + const uint8_t pat_lc = ctrl->dq_pat_lc + segment_wdb_lc[t];
  953. + program_loop_count(ctrl, channel, pat_lc);
  954. + } else if (dq_pat == CADB) {
  955. + setup_cadb(ctrl, channel, num_tests, t);
  956. + } else if (dq_pat == TURN_AROUND_WR || dq_pat == TURN_AROUND_ODT) {
  957. + union reut_seq_cfg_reg reut_seq_cfg = {
  958. + .raw = mchbar_read64(REUT_ch_SEQ_CFG(channel)),
  959. + };
  960. + reut_seq_cfg.subsequence_start_pointer = t;
  961. + reut_seq_cfg.subsequence_end_pointer = t;
  962. + mchbar_write64(REUT_ch_SEQ_CFG(channel), reut_seq_cfg.raw);
  963. + union reut_seq_addr_inc_ctl_reg addr_inc_ctl = {
  964. + .raw = mchbar_read64(REUT_ch_SEQ_ADDR_INC_CTL(channel)),
  965. + };
  966. + uint8_t ta_inc_rate = 1;
  967. + if (dq_pat == TURN_AROUND_WR && (t == 0 || t == 7))
  968. + ta_inc_rate = 0;
  969. + else if (dq_pat == TURN_AROUND_ODT && (t == 0 || t == 2))
  970. + ta_inc_rate = 0;
  971. +
  972. + /* Program increment rate as linear value */
  973. + addr_inc_ctl.rank_addr_update = BIT(7) | ta_inc_rate;
  974. + addr_inc_ctl.col_addr_update = BIT(7) | ta_inc_rate;
  975. + mchbar_write64(REUT_ch_SEQ_ADDR_INC_CTL(channel),
  976. + addr_inc_ctl.raw);
  977. + } else if (dq_pat == RD_RD_TA) {
  978. + tc_bank_rank_a[channel].tRDRD_sr = (t == 0) ? 4 : 5;
  979. + mchbar_write32(TC_BANK_RANK_A_ch(channel),
  980. + tc_bank_rank_a[channel].raw);
  981. + } else if (dq_pat == RD_RD_TA_ALL) {
  982. + /*
  983. + * Program tRDRD for SR and DR. Run 8 tests, covering
  984. + * tRDRD_sr = 4, 5, 6, 7 and tRDRD_dr = min, +1, +2, +3
  985. + */
  986. + const uint32_t tRDRD_dr = ctrl->tc_bankrank_a[channel].tRDRD_dr;
  987. + tc_bank_rank_a[channel].tRDRD_sr = (t % 4) + 4;
  988. + tc_bank_rank_a[channel].tRDRD_dr = (t % 4) + tRDRD_dr;
  989. + mchbar_write32(TC_BANK_RANK_A_ch(channel),
  990. + tc_bank_rank_a[channel].raw);
  991. +
  992. + /* Program linear rank increment rate */
  993. + union reut_seq_addr_inc_ctl_reg addr_inc_ctl = {
  994. + .raw = mchbar_read64(REUT_ch_SEQ_ADDR_INC_CTL(channel)),
  995. + };
  996. + addr_inc_ctl.rank_addr_update = BIT(7) | (t / 4) ? 0 : 31;
  997. + mchbar_write64(REUT_ch_SEQ_ADDR_INC_CTL(channel),
  998. + addr_inc_ctl.raw);
  999. + }
  1000. + }
  1001. + bool test_soe = false;
  1002. + for (uint8_t channel = 0; channel < NUM_CHANNELS; channel++) {
  1003. + if (!(chanmask & BIT(channel)))
  1004. + continue;
  1005. +
  1006. + const union reut_err_ctl_reg reut_err_ctl = {
  1007. + .raw = mchbar_read32(REUT_ch_ERR_CONTROL(channel)),
  1008. + };
  1009. + const uint8_t soe = reut_err_ctl.stop_on_error_control;
  1010. + if (soe != NSOE) {
  1011. + test_soe = true;
  1012. + break;
  1013. + }
  1014. + }
  1015. + io_reset();
  1016. + mchbar_write32(REUT_GLOBAL_CTL, (union reut_seq_ctl_reg) {
  1017. + .start_test = 1,
  1018. + .clear_errors = clear_errors && t == 0,
  1019. + }.raw);
  1020. + struct mono_time prev, curr;
  1021. + timer_monotonic_get(&prev);
  1022. + union reut_global_err_reg global_err;
  1023. + do {
  1024. + global_err.raw = mchbar_read32(REUT_GLOBAL_ERR);
  1025. + /** TODO: Clean up this mess **/
  1026. + timer_monotonic_get(&curr);
  1027. + if (mono_time_diff_microseconds(&prev, &curr) > 1000 * 1000) {
  1028. + mchbar_write32(REUT_GLOBAL_CTL, (union reut_seq_ctl_reg) {
  1029. + .stop_test = 1,
  1030. + }.raw);
  1031. + printk(BIOS_ERR, "REUT timed out, ch_done: %x\n",
  1032. + global_err.ch_test_done);
  1033. + break;
  1034. + }
  1035. + } while ((global_err.ch_test_done & chanmask) != chanmask);
  1036. + if (test_soe && global_err.ch_error & chanmask)
  1037. + break;
  1038. + }
  1039. + if (is_turnaround) {
  1040. + for (uint8_t channel = 0; channel < NUM_CHANNELS; channel++) {
  1041. + if (!(chanmask & BIT(channel)))
  1042. + continue;
  1043. +
  1044. + mchbar_write32(TC_BANK_RANK_A_ch(channel),
  1045. + ctrl->tc_bankrank_a[channel].raw);
  1046. + }
  1047. + }
  1048. + return ((union reut_global_err_reg)mchbar_read32(REUT_GLOBAL_ERR)).ch_error;
  1049. +}
  1050. diff --git a/src/northbridge/intel/haswell/registers/mchbar.h b/src/northbridge/intel/haswell/registers/mchbar.h
  1051. index f8408e51a0..817a9f8bf8 100644
  1052. --- a/src/northbridge/intel/haswell/registers/mchbar.h
  1053. +++ b/src/northbridge/intel/haswell/registers/mchbar.h
  1054. @@ -94,20 +94,35 @@
  1055. #define TC_BANK_RANK_D_ch(ch) _MCMAIN_C(0x4014, ch)
  1056. #define SC_ROUNDT_LAT_ch(ch) _MCMAIN_C(0x4024, ch)
  1057. +#define REUT_ch_PAT_WDB_CL_MUX_CFG(ch) _MCMAIN_C(0x4040, ch)
  1058. +
  1059. #define REUT_ch_PAT_WDB_CL_MUX_WR_x(ch, x) _MCMAIN_C_X(0x4048, ch, x) /* x in 0 .. 2 */
  1060. #define REUT_ch_PAT_WDB_CL_MUX_RD_x(ch, x) _MCMAIN_C_X(0x4054, ch, x) /* x in 0 .. 2 */
  1061. #define REUT_ch_PAT_WDB_CL_MUX_LMN(ch) _MCMAIN_C(0x4078, ch)
  1062. +#define REUT_ch_PAT_WDB_INV(ch) _MCMAIN_C(0x4084, ch)
  1063. +
  1064. +#define REUT_ch_ERR_CONTROL(ch) _MCMAIN_C(0x4098, ch)
  1065. +#define REUT_ch_ERR_ECC_MASK(ch) _MCMAIN_C(0x409c, ch)
  1066. +
  1067. #define SC_WR_ADD_DELAY_ch(ch) _MCMAIN_C(0x40d0, ch)
  1068. +#define REUT_ch_ERR_DATA_MASK(ch) _MCMAIN_C(0x40d8, ch)
  1069. +
  1070. #define REUT_ch_MISC_CKE_CTRL(ch) _MCMAIN_C(0x4190, ch)
  1071. +#define REUT_ch_MISC_PAT_CADB_CTRL(ch) _MCMAIN_C(0x4198, ch)
  1072. #define REUT_ch_PAT_CADB_MRS(ch) _MCMAIN_C(0x419c, ch)
  1073. +#define REUT_ch_PAT_CADB_MUX_CTRL(ch) _MCMAIN_C(0x41a0, ch)
  1074. +#define REUT_ch_PAT_CADB_MUX_x(ch, x) _MCMAIN_C_X(0x41a4, ch, x) /* x in 0 .. 2 */
  1075. +#define REUT_ch_PAT_CADB_CL_MUX_LMN(ch) _MCMAIN_C(0x41b0, ch)
  1076. #define REUT_ch_PAT_CADB_WRITE_PTR(ch) _MCMAIN_C(0x41bc, ch)
  1077. #define REUT_ch_PAT_CADB_PROG(ch) _MCMAIN_C(0x41c0, ch)
  1078. +#define REUT_ch_WDB_CL_CTRL(ch) _MCMAIN_C(0x4200, ch)
  1079. +
  1080. #define TC_ZQCAL_ch(ch) _MCMAIN_C(0x4290, ch)
  1081. #define TC_RFP_ch(ch) _MCMAIN_C(0x4294, ch)
  1082. #define TC_RFTP_ch(ch) _MCMAIN_C(0x4298, ch)
  1083. @@ -119,12 +134,27 @@
  1084. #define QCLK_ch_LDAT_SDAT(ch) _MCMAIN_C(0x42d4, ch)
  1085. #define QCLK_ch_LDAT_DATA_IN_x(ch, x) _MCMAIN_C_X(0x42dc, ch, x) /* x in 0 .. 1 */
  1086. +#define REUT_GLOBAL_CTL 0x4800
  1087. #define REUT_GLOBAL_ERR 0x4804
  1088. +#define REUT_ch_SUBSEQ_x_CTL(ch, x) (0x4808 + 40 * (ch) + 4 * (x))
  1089. +
  1090. #define REUT_ch_SEQ_CFG(ch) (0x48a8 + 8 * (ch))
  1091. #define REUT_ch_SEQ_CTL(ch) (0x48b8 + 4 * (ch))
  1092. +#define REUT_ch_SEQ_ADDR_START(ch) (0x48d8 + 8 * (ch))
  1093. +
  1094. +#define REUT_ch_SEQ_ADDR_WRAP(ch) (0x48e8 + 8 * (ch))
  1095. +
  1096. +#define REUT_ch_SEQ_MISC_CTL(ch) (0x4908 + 4 * (ch))
  1097. +
  1098. +#define REUT_ch_SEQ_ADDR_INC_CTL(ch) (0x4910 + 8 * (ch))
  1099. +
  1100. +#define REUT_ch_RANK_LOG_TO_PHYS(ch) (0x4930 + 4 * (ch)) /* 4 bits per rank */
  1101. +
  1102. +#define HSW_REUT_ch_SEQ_LOOP_COUNT(ch) (0x4980 + 4 * (ch)) /* *** only on C0 *** */
  1103. +
  1104. /* MCMAIN broadcast */
  1105. #define MCSCHEDS_CBIT 0x4c20
  1106. --
  1107. 2.39.2