0025-haswell-NRI-Add-final-raminit-steps.patch 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571
  1. From d041b14f3af69db5f4598c84e3f53c9cd572ffb5 Mon Sep 17 00:00:00 2001
  2. From: Angel Pons <th3fanbus@gmail.com>
  3. Date: Sun, 8 May 2022 14:29:05 +0200
  4. Subject: [PATCH 25/26] haswell NRI: Add final raminit steps
  5. Implement the remaining raminit steps. Although many training steps are
  6. missing, this is enough to boot on the Asrock B85M Pro4.
  7. Change-Id: I94f3b65f0218d4da4fda4d84592dfd91f77f8f21
  8. Signed-off-by: Angel Pons <th3fanbus@gmail.com>
  9. ---
  10. src/northbridge/intel/haswell/Kconfig | 4 +-
  11. .../intel/haswell/native_raminit/Makefile.inc | 1 +
  12. .../haswell/native_raminit/activate_mc.c | 388 ++++++++++++++++++
  13. .../haswell/native_raminit/raminit_main.c | 5 +-
  14. .../haswell/native_raminit/raminit_native.c | 5 +-
  15. .../haswell/native_raminit/raminit_native.h | 2 +
  16. .../haswell/native_raminit/reg_structs.h | 12 +
  17. .../intel/haswell/registers/mchbar.h | 7 +
  18. 8 files changed, 416 insertions(+), 8 deletions(-)
  19. create mode 100644 src/northbridge/intel/haswell/native_raminit/activate_mc.c
  20. diff --git a/src/northbridge/intel/haswell/Kconfig b/src/northbridge/intel/haswell/Kconfig
  21. index b659bf6d98..61f2a3c64c 100644
  22. --- a/src/northbridge/intel/haswell/Kconfig
  23. +++ b/src/northbridge/intel/haswell/Kconfig
  24. @@ -10,12 +10,12 @@ config NORTHBRIDGE_INTEL_HASWELL
  25. if NORTHBRIDGE_INTEL_HASWELL
  26. config USE_NATIVE_RAMINIT
  27. - bool "[NOT WORKING] Use native raminit"
  28. + bool "[NOT COMPLETE] Use native raminit"
  29. default n
  30. select HAVE_DEBUG_RAM_SETUP
  31. help
  32. Select if you want to use coreboot implementation of raminit rather than
  33. - MRC.bin. Currently incomplete and does not boot.
  34. + MRC.bin. Currently incomplete and does not support S3 resume.
  35. config HASWELL_VBOOT_IN_BOOTBLOCK
  36. depends on VBOOT
  37. diff --git a/src/northbridge/intel/haswell/native_raminit/Makefile.inc b/src/northbridge/intel/haswell/native_raminit/Makefile.inc
  38. index 40c2f5e014..d97da72890 100644
  39. --- a/src/northbridge/intel/haswell/native_raminit/Makefile.inc
  40. +++ b/src/northbridge/intel/haswell/native_raminit/Makefile.inc
  41. @@ -1,5 +1,6 @@
  42. ## SPDX-License-Identifier: GPL-2.0-or-later
  43. +romstage-y += activate_mc.c
  44. romstage-y += change_margin.c
  45. romstage-y += configure_mc.c
  46. romstage-y += ddr3.c
  47. diff --git a/src/northbridge/intel/haswell/native_raminit/activate_mc.c b/src/northbridge/intel/haswell/native_raminit/activate_mc.c
  48. new file mode 100644
  49. index 0000000000..78a7ad27ef
  50. --- /dev/null
  51. +++ b/src/northbridge/intel/haswell/native_raminit/activate_mc.c
  52. @@ -0,0 +1,388 @@
  53. +/* SPDX-License-Identifier: GPL-2.0-or-later */
  54. +
  55. +#include <console/console.h>
  56. +#include <delay.h>
  57. +#include <device/pci_ops.h>
  58. +#include <northbridge/intel/haswell/haswell.h>
  59. +#include <timer.h>
  60. +#include <types.h>
  61. +
  62. +#include "raminit_native.h"
  63. +
  64. +static void update_internal_clocks_on(struct sysinfo *ctrl)
  65. +{
  66. + for (uint8_t channel = 0; channel < NUM_CHANNELS; channel++) {
  67. + if (!does_ch_exist(ctrl, channel))
  68. + continue;
  69. +
  70. + bool clocks_on = false;
  71. + for (uint8_t byte = 0; byte < ctrl->lanes; byte++) {
  72. + const union ddr_data_control_1_reg data_control_1 = {
  73. + .raw = ctrl->dq_control_1[channel][byte],
  74. + };
  75. + const int8_t o_on = data_control_1.odt_delay;
  76. + const int8_t s_on = data_control_1.sense_amp_delay;
  77. + const int8_t o_off = data_control_1.odt_duration;
  78. + const int8_t s_off = data_control_1.sense_amp_duration;
  79. + if (o_on + o_off >= 7 || s_on + s_off >= 7) {
  80. + clocks_on = true;
  81. + break;
  82. + }
  83. + }
  84. + union ddr_data_control_0_reg data_control_0 = {
  85. + .raw = ctrl->dq_control_0[channel],
  86. + };
  87. + data_control_0.internal_clocks_on = clocks_on;
  88. + ctrl->dq_control_0[channel] = data_control_0.raw;
  89. + mchbar_write32(DDR_DATA_ch_CONTROL_0(channel), data_control_0.raw);
  90. + }
  91. +}
  92. +
  93. +/* Switch off unused segments of the SDLL to save power */
  94. +static void update_sdll_length(struct sysinfo *ctrl)
  95. +{
  96. + for (uint8_t channel = 0; channel < NUM_CHANNELS; channel++) {
  97. + if (!does_ch_exist(ctrl, channel))
  98. + continue;
  99. +
  100. + for (uint8_t byte = 0; byte < ctrl->lanes; byte++) {
  101. + uint8_t max_pi = 0;
  102. + for (uint8_t rank = 0; rank < NUM_SLOTRANKS; rank++) {
  103. + if (!rank_in_ch(ctrl, rank, channel))
  104. + continue;
  105. +
  106. + const uint8_t rx_dqs_p = ctrl->rxdqsp[channel][rank][byte];
  107. + const uint8_t rx_dqs_n = ctrl->rxdqsn[channel][rank][byte];
  108. + max_pi = MAX(max_pi, MAX(rx_dqs_p, rx_dqs_n));
  109. + }
  110. + /* Update SDLL length for power savings */
  111. + union ddr_data_control_1_reg data_control_1 = {
  112. + .raw = ctrl->dq_control_1[channel][byte],
  113. + };
  114. + /* Calculate which segments to turn off */
  115. + data_control_1.sdll_segment_disable = (7 - (max_pi >> 3)) & ~1;
  116. + ctrl->dq_control_1[channel][byte] = data_control_1.raw;
  117. + mchbar_write32(DQ_CONTROL_1(channel, byte), data_control_1.raw);
  118. + }
  119. + }
  120. +}
  121. +
  122. +static void set_rx_clk_stg_num(struct sysinfo *ctrl, const uint8_t channel)
  123. +{
  124. + const uint8_t rcven_drift = ctrl->lpddr ? DIV_ROUND_UP(tDQSCK_DRIFT, ctrl->qclkps) : 1;
  125. + uint8_t max_rcven = 0;
  126. + for (uint8_t rank = 0; rank < NUM_SLOTRANKS; rank++) {
  127. + if (!rank_in_ch(ctrl, rank, channel))
  128. + continue;
  129. +
  130. + for (uint8_t byte = 0; byte < ctrl->lanes; byte++)
  131. + max_rcven = MAX(max_rcven, ctrl->rcven[channel][rank][byte] / 64);
  132. + }
  133. + const union ddr_data_control_1_reg ddr_data_control_1 = {
  134. + .raw = ctrl->dq_control_1[channel][0],
  135. + };
  136. + const bool lpddr_long_odt = ddr_data_control_1.lpddr_long_odt_en;
  137. + const uint8_t rcven_turnoff = max_rcven + 18 + 2 * rcven_drift + lpddr_long_odt;
  138. + const union ddr_data_control_0_reg ddr_data_control_0 = {
  139. + .raw = ctrl->dq_control_0[channel],
  140. + };
  141. + for (uint8_t byte = 0; byte < ctrl->lanes; byte++) {
  142. + union ddr_data_control_2_reg ddr_data_control_2 = {
  143. + .raw = ctrl->dq_control_2[channel][byte],
  144. + };
  145. + if (ddr_data_control_0.odt_samp_extend_en) {
  146. + if (ddr_data_control_2.rx_clk_stg_num < rcven_turnoff)
  147. + ddr_data_control_2.rx_clk_stg_num = rcven_turnoff;
  148. + } else {
  149. + const int8_t o_on = ddr_data_control_1.odt_delay;
  150. + const int8_t o_off = ddr_data_control_1.odt_duration;
  151. + ddr_data_control_2.rx_clk_stg_num = MAX(17, o_on + o_off + 14);
  152. + }
  153. + ctrl->dq_control_2[channel][byte] = ddr_data_control_2.raw;
  154. + mchbar_write32(DQ_CONTROL_2(channel, byte), ddr_data_control_2.raw);
  155. + }
  156. +}
  157. +
  158. +#define SELF_REFRESH_IDLE_COUNT 0x200
  159. +
  160. +static void enter_sr(void)
  161. +{
  162. + mchbar_write32(PM_SREF_CONFIG, SELF_REFRESH_IDLE_COUNT | BIT(16));
  163. + udelay(1);
  164. +}
  165. +
  166. +enum power_down_mode {
  167. + PDM_NO_PD = 0,
  168. + PDM_APD = 1,
  169. + PDM_PPD = 2,
  170. + PDM_PPD_DLL_OFF = 6,
  171. +};
  172. +
  173. +static void power_down_config(struct sysinfo *ctrl)
  174. +{
  175. + const enum power_down_mode pd_mode = ctrl->lpddr ? PDM_PPD : PDM_PPD_DLL_OFF;
  176. + mchbar_write32(PM_PDWN_CONFIG, pd_mode << 12 | 0x40);
  177. +}
  178. +
  179. +static void train_power_modes_post(struct sysinfo *ctrl)
  180. +{
  181. + for (uint8_t channel = 0; channel < NUM_CHANNELS; channel++) {
  182. + if (!does_ch_exist(ctrl, channel))
  183. + continue;
  184. +
  185. + /* Adjust tCPDED and tPRPDEN */
  186. + if (ctrl->mem_clock_mhz >= 933)
  187. + ctrl->tc_bankrank_d[channel].tCPDED = 2;
  188. +
  189. + if (ctrl->mem_clock_mhz >= 1066)
  190. + ctrl->tc_bankrank_d[channel].tPRPDEN = 2;
  191. +
  192. + mchbar_write32(TC_BANK_RANK_D_ch(channel), ctrl->tc_bankrank_d[channel].raw);
  193. + }
  194. + power_down_config(ctrl);
  195. + mchbar_write32(MCDECS_CBIT, BIT(30)); /* dis_msg_clk_gate */
  196. +}
  197. +
  198. +static uint8_t compute_burst_end_odt_delay(const struct sysinfo *const ctrl)
  199. +{
  200. + /* Must be disabled for LPDDR */
  201. + if (ctrl->lpddr)
  202. + return 0;
  203. +
  204. + const uint8_t beod = MIN(7, DIV_ROUND_CLOSEST(14300 * 20 / 100, ctrl->qclkps));
  205. + if (beod < 3)
  206. + return 0;
  207. +
  208. + if (beod < 4)
  209. + return 4;
  210. +
  211. + return beod;
  212. +}
  213. +
  214. +static void program_burst_end_odt_delay(struct sysinfo *ctrl)
  215. +{
  216. + /* Program burst_end_odt_delay - it should be zero during training steps */
  217. + const uint8_t beod = compute_burst_end_odt_delay(ctrl);
  218. + for (uint8_t channel = 0; channel < NUM_CHANNELS; channel++) {
  219. + if (!does_ch_exist(ctrl, channel))
  220. + continue;
  221. +
  222. + for (uint8_t byte = 0; byte < ctrl->lanes; byte++) {
  223. + union ddr_data_control_1_reg ddr_data_control_1 = {
  224. + .raw = ctrl->dq_control_1[channel][byte],
  225. + };
  226. + ddr_data_control_1.burst_end_odt_delay = beod;
  227. + ctrl->dq_control_1[channel][byte] = ddr_data_control_1.raw;
  228. + mchbar_write32(DQ_CONTROL_1(channel, byte), ddr_data_control_1.raw);
  229. + }
  230. + }
  231. +}
  232. +
  233. +/*
  234. + * Return a random value to use for scrambler seeds. Try to use RDRAND
  235. + * first and fall back to hardcoded values if RDRAND does not succeed.
  236. + */
  237. +static uint16_t get_random_number(const uint8_t channel)
  238. +{
  239. + /* The RDRAND instruction is only available 100k cycles after reset */
  240. + for (size_t i = 0; i < 100000; i++) {
  241. + uint32_t status;
  242. + uint32_t random;
  243. + /** TODO: Clean up asm **/
  244. + __asm__ __volatile__(
  245. + "\n\t .byte 0x0F, 0xC7, 0xF0"
  246. + "\n\t movl %%eax, %0"
  247. + "\n\t pushf"
  248. + "\n\t pop %%eax"
  249. + "\n\t movl %%eax, %1"
  250. + : "=m"(random),
  251. + "=m"(status)
  252. + : /* No inputs */
  253. + : "eax", "cc");
  254. +
  255. + /* Only consider non-zero random values as valid */
  256. + if (status & 1 && random)
  257. + return random;
  258. + }
  259. +
  260. + /* https://xkcd.com/221 */
  261. + if (channel)
  262. + return 0x28f4;
  263. + else
  264. + return 0x893e;
  265. +}
  266. +
  267. +/* Work around "error: 'typeof' applied to a bit-field" */
  268. +static inline uint32_t max(const uint32_t a, const uint32_t b)
  269. +{
  270. + return MAX(a, b);
  271. +}
  272. +
  273. +enum raminit_status activate_mc(struct sysinfo *ctrl)
  274. +{
  275. + const bool enable_scrambling = true;
  276. + const bool enable_cmd_tristate = true;
  277. + for (uint8_t channel = 0; channel < NUM_CHANNELS; channel++) {
  278. + if (!does_ch_exist(ctrl, channel))
  279. + continue;
  280. +
  281. + if (enable_scrambling && ctrl->stepping < STEPPING_C0) {
  282. + /* Make sure tRDRD_(sr, dr, dd) are at least 6 for scrambler W/A */
  283. + union tc_bank_rank_a_reg tc_bank_rank_a = {
  284. + .raw = mchbar_read32(TC_BANK_RANK_A_ch(channel)),
  285. + };
  286. + tc_bank_rank_a.tRDRD_sr = max(tc_bank_rank_a.tRDRD_sr, 6);
  287. + tc_bank_rank_a.tRDRD_dr = max(tc_bank_rank_a.tRDRD_dr, 6);
  288. + tc_bank_rank_a.tRDRD_dd = max(tc_bank_rank_a.tRDRD_dd, 6);
  289. + mchbar_write32(TC_BANK_RANK_A_ch(channel), tc_bank_rank_a.raw);
  290. + }
  291. + if (enable_scrambling) {
  292. + const union ddr_scramble_reg ddr_scramble = {
  293. + .scram_key = get_random_number(channel),
  294. + .scram_en = 1,
  295. + };
  296. + mchbar_write32(DDR_SCRAMBLE_ch(channel), ddr_scramble.raw);
  297. + }
  298. + if (ctrl->tCMD == 1) {
  299. + /* If we are in 1N mode, enable and set command rate limit to 3 */
  300. + union mcmain_command_rate_limit_reg cmd_rate_limit = {
  301. + .raw = mchbar_read32(COMMAND_RATE_LIMIT_ch(channel)),
  302. + };
  303. + cmd_rate_limit.enable_cmd_limit = 1;
  304. + cmd_rate_limit.cmd_rate_limit = 3;
  305. + mchbar_write32(COMMAND_RATE_LIMIT_ch(channel), cmd_rate_limit.raw);
  306. + }
  307. + if (enable_cmd_tristate) {
  308. + /* Enable command tri-state at the end of training */
  309. + union tc_bank_rank_a_reg tc_bank_rank_a = {
  310. + .raw = mchbar_read32(TC_BANK_RANK_A_ch(channel)),
  311. + };
  312. + tc_bank_rank_a.cmd_3st_dis = 0;
  313. + mchbar_write32(TC_BANK_RANK_A_ch(channel), tc_bank_rank_a.raw);
  314. + }
  315. + /* Set MC to normal mode and clean the ODT and CKE */
  316. + mchbar_write32(REUT_ch_SEQ_CFG(channel), REUT_MODE_NOP << 12);
  317. + /* Set again the rank occupancy */
  318. + mchbar_write8(MC_INIT_STATE_ch(channel), ctrl->rankmap[channel]);
  319. + if (ctrl->is_ecc) {
  320. + /* Enable ECC I/O and logic */
  321. + union mad_dimm_reg mad_dimm = {
  322. + .raw = mchbar_read32(MAD_DIMM(channel)),
  323. + };
  324. + mad_dimm.ecc_mode = 3;
  325. + mchbar_write32(MAD_DIMM(channel), mad_dimm.raw);
  326. + }
  327. + }
  328. +
  329. + if (!is_hsw_ult())
  330. + update_internal_clocks_on(ctrl);
  331. +
  332. + update_sdll_length(ctrl);
  333. +
  334. + program_burst_end_odt_delay(ctrl);
  335. +
  336. + if (is_hsw_ult()) {
  337. + for (uint8_t channel = 0; channel < NUM_CHANNELS; channel++) {
  338. + if (!does_ch_exist(ctrl, channel))
  339. + continue;
  340. +
  341. + set_rx_clk_stg_num(ctrl, channel);
  342. + }
  343. + /** TODO: Program DDRPL_CR_DDR_TX_DELAY if Memory Trace is enabled **/
  344. + }
  345. +
  346. + /* Enable periodic COMP */
  347. + mchbar_write32(M_COMP, (union pcu_comp_reg) {
  348. + .comp_interval = COMP_INT,
  349. + }.raw);
  350. +
  351. + /* Enable the power mode before PCU starts working */
  352. + train_power_modes_post(ctrl);
  353. +
  354. + /* Set idle timer and self refresh enable bits */
  355. + enter_sr();
  356. +
  357. + /** FIXME: Do not hardcode power weights and RAPL settings **/
  358. + mchbar_write32(0x5888, 0x00000d0d);
  359. + mchbar_write32(0x5884, 0x00000004); /* 58.2 pJ */
  360. +
  361. + mchbar_write32(0x58e0, 0);
  362. + mchbar_write32(0x58e4, 0);
  363. +
  364. + mchbar_write32(0x5890, 0xffff);
  365. + mchbar_write32(0x5894, 0xffff);
  366. + mchbar_write32(0x5898, 0xffff);
  367. + mchbar_write32(0x589c, 0xffff);
  368. + mchbar_write32(0x58d0, 0xffff);
  369. + mchbar_write32(0x58d4, 0xffff);
  370. + mchbar_write32(0x58d8, 0xffff);
  371. + mchbar_write32(0x58dc, 0xffff);
  372. +
  373. + /* Overwrite thermal parameters */
  374. + for (uint8_t channel = 0; channel < NUM_CHANNELS; channel++) {
  375. + mchbar_write32(_MCMAIN_C(0x42ec, channel), 0x0000000f);
  376. + mchbar_write32(_MCMAIN_C(0x42f0, channel), 0x00000009);
  377. + mchbar_write32(_MCMAIN_C(0x42f4, channel), 0x00000093);
  378. + mchbar_write32(_MCMAIN_C(0x42f8, channel), 0x00000087);
  379. + mchbar_write32(_MCMAIN_C(0x42fc, channel), 0x000000de);
  380. +
  381. + /** TODO: Differs for LPDDR **/
  382. + mchbar_write32(PM_THRT_CKE_MIN_ch(channel), 0x30);
  383. + }
  384. + mchbar_write32(PCU_DDR_PTM_CTL, 0x40);
  385. + return RAMINIT_STATUS_SUCCESS;
  386. +}
  387. +
  388. +static void mc_lockdown(void)
  389. +{
  390. + /* Lock memory controller registers */
  391. + mchbar_write32(MC_LOCK, 0x8f);
  392. +
  393. + /* MPCOHTRK_GDXC_OCLA_ADDRESS_HI_LOCK is set when programming the memory map */
  394. +
  395. + /* Lock memory map registers */
  396. + pci_or_config16(HOST_BRIDGE, GGC, 1 << 0);
  397. + pci_or_config32(HOST_BRIDGE, DPR, 1 << 0);
  398. + pci_or_config32(HOST_BRIDGE, MESEG_LIMIT, 1 << 10);
  399. + pci_or_config32(HOST_BRIDGE, REMAPBASE, 1 << 0);
  400. + pci_or_config32(HOST_BRIDGE, REMAPLIMIT, 1 << 0);
  401. + pci_or_config32(HOST_BRIDGE, TOM, 1 << 0);
  402. + pci_or_config32(HOST_BRIDGE, TOUUD, 1 << 0);
  403. + pci_or_config32(HOST_BRIDGE, BDSM, 1 << 0);
  404. + pci_or_config32(HOST_BRIDGE, BGSM, 1 << 0);
  405. + pci_or_config32(HOST_BRIDGE, TOLUD, 1 << 0);
  406. +}
  407. +
  408. +enum raminit_status raminit_done(struct sysinfo *ctrl)
  409. +{
  410. + union mc_init_state_g_reg mc_init_state_g = {
  411. + .raw = mchbar_read32(MC_INIT_STATE_G),
  412. + };
  413. + mc_init_state_g.refresh_enable = 1;
  414. + mc_init_state_g.pu_mrc_done = 1;
  415. + mc_init_state_g.mrc_done = 1;
  416. + mchbar_write32(MC_INIT_STATE_G, mc_init_state_g.raw);
  417. +
  418. + /* Lock the memory controller to enable normal operation */
  419. + mc_lockdown();
  420. +
  421. + /* Poll for mc_init_done_ack to make sure memory initialization is complete */
  422. + printk(BIOS_DEBUG, "Waiting for mc_init_done acknowledgement... ");
  423. +
  424. + struct stopwatch timer;
  425. + stopwatch_init_msecs_expire(&timer, 2000);
  426. + do {
  427. + mc_init_state_g.raw = mchbar_read32(MC_INIT_STATE_G);
  428. +
  429. + /* DRAM will NOT work without the acknowledgement. There is no hope. */
  430. + if (stopwatch_expired(&timer))
  431. + die("\nTimed out waiting for mc_init_done acknowledgement\n");
  432. +
  433. + } while (mc_init_state_g.mc_init_done_ack == 0);
  434. + printk(BIOS_DEBUG, "DONE!\n");
  435. +
  436. + /* Provide some data for the graphics driver. Yes, it's hardcoded. */
  437. + mchbar_write32(SSKPD + 0, 0x05a2404f);
  438. + mchbar_write32(SSKPD + 4, 0x140000a0);
  439. + return RAMINIT_STATUS_SUCCESS;
  440. +}
  441. diff --git a/src/northbridge/intel/haswell/native_raminit/raminit_main.c b/src/northbridge/intel/haswell/native_raminit/raminit_main.c
  442. index 1ff23be615..3a65fb01fb 100644
  443. --- a/src/northbridge/intel/haswell/native_raminit/raminit_main.c
  444. +++ b/src/northbridge/intel/haswell/native_raminit/raminit_main.c
  445. @@ -63,6 +63,8 @@ static const struct task_entry cold_boot[] = {
  446. { train_receive_enable, true, "RCVET", },
  447. { train_read_mpr, true, "RDMPRT", },
  448. { train_jedec_write_leveling, true, "JWRL", },
  449. + { activate_mc, true, "ACTIVATE", },
  450. + { raminit_done, true, "RAMINITEND", },
  451. };
  452. /* Return a generic stepping value to make stepping checks simpler */
  453. @@ -143,7 +145,4 @@ void raminit_main(const enum raminit_boot_mode bootmode)
  454. if (status != RAMINIT_STATUS_SUCCESS)
  455. die("Memory initialization was met with utmost failure and misery\n");
  456. -
  457. - /** TODO: Implement the required magic **/
  458. - die("NATIVE RAMINIT: More Magic (tm) required.\n");
  459. }
  460. diff --git a/src/northbridge/intel/haswell/native_raminit/raminit_native.c b/src/northbridge/intel/haswell/native_raminit/raminit_native.c
  461. index bd9bc8e692..1ea729b23d 100644
  462. --- a/src/northbridge/intel/haswell/native_raminit/raminit_native.c
  463. +++ b/src/northbridge/intel/haswell/native_raminit/raminit_native.c
  464. @@ -200,8 +200,6 @@ void perform_raminit(const int s3resume)
  465. else
  466. me_status = ME_INIT_STATUS_SUCCESS;
  467. - /** TODO: Remove this once raminit is implemented **/
  468. - me_status = ME_INIT_STATUS_ERROR;
  469. intel_early_me_init_done(me_status);
  470. }
  471. @@ -217,7 +215,8 @@ void perform_raminit(const int s3resume)
  472. }
  473. /* Save training data on non-S3 resumes */
  474. - if (!s3resume)
  475. + /** TODO: Enable this once training data is populated **/
  476. + if (0 && !s3resume)
  477. save_mrc_data(&md);
  478. /** TODO: setup_sdram_meminfo **/
  479. diff --git a/src/northbridge/intel/haswell/native_raminit/raminit_native.h b/src/northbridge/intel/haswell/native_raminit/raminit_native.h
  480. index 666b233c45..98e39cb76e 100644
  481. --- a/src/northbridge/intel/haswell/native_raminit/raminit_native.h
  482. +++ b/src/northbridge/intel/haswell/native_raminit/raminit_native.h
  483. @@ -449,6 +449,8 @@ enum raminit_status do_jedec_init(struct sysinfo *ctrl);
  484. enum raminit_status train_receive_enable(struct sysinfo *ctrl);
  485. enum raminit_status train_read_mpr(struct sysinfo *ctrl);
  486. enum raminit_status train_jedec_write_leveling(struct sysinfo *ctrl);
  487. +enum raminit_status activate_mc(struct sysinfo *ctrl);
  488. +enum raminit_status raminit_done(struct sysinfo *ctrl);
  489. void configure_timings(struct sysinfo *ctrl);
  490. void configure_refresh(struct sysinfo *ctrl);
  491. diff --git a/src/northbridge/intel/haswell/native_raminit/reg_structs.h b/src/northbridge/intel/haswell/native_raminit/reg_structs.h
  492. index a0e36ed082..0d9aaa1f7c 100644
  493. --- a/src/northbridge/intel/haswell/native_raminit/reg_structs.h
  494. +++ b/src/northbridge/intel/haswell/native_raminit/reg_structs.h
  495. @@ -294,6 +294,18 @@ union ddr_cke_ctl_controls_reg {
  496. uint32_t raw;
  497. };
  498. +union ddr_scramble_reg {
  499. + struct __packed {
  500. + uint32_t scram_en : 1; // Bits 0:0
  501. + uint32_t scram_key : 16; // Bits 16:1
  502. + uint32_t clk_gate_ab : 2; // Bits 18:17
  503. + uint32_t clk_gate_c : 2; // Bits 20:19
  504. + uint32_t en_dbi_ab : 1; // Bits 21:21
  505. + uint32_t : 10; // Bits 31:17
  506. + };
  507. + uint32_t raw;
  508. +};
  509. +
  510. union ddr_scram_misc_control_reg {
  511. struct __packed {
  512. uint32_t wl_wake_cycles : 2; // Bits 1:0
  513. diff --git a/src/northbridge/intel/haswell/registers/mchbar.h b/src/northbridge/intel/haswell/registers/mchbar.h
  514. index 7c0b5a49de..49a215aa71 100644
  515. --- a/src/northbridge/intel/haswell/registers/mchbar.h
  516. +++ b/src/northbridge/intel/haswell/registers/mchbar.h
  517. @@ -20,6 +20,7 @@
  518. #define DDR_DATA_TRAIN_FEEDBACK(ch, byte) _DDRIO_C_R_B(0x0054, ch, 0, byte)
  519. +#define DQ_CONTROL_1(ch, byte) _DDRIO_C_R_B(0x0060, ch, 0, byte)
  520. #define DQ_CONTROL_2(ch, byte) _DDRIO_C_R_B(0x0064, ch, 0, byte)
  521. #define DDR_DATA_OFFSET_TRAIN_ch_b(ch, byte) _DDRIO_C_R_B(0x0070, ch, 0, byte)
  522. #define DQ_CONTROL_0(ch, byte) _DDRIO_C_R_B(0x0074, ch, 0, byte)
  523. @@ -147,6 +148,8 @@
  524. #define QCLK_ch_LDAT_SDAT(ch) _MCMAIN_C(0x42d4, ch)
  525. #define QCLK_ch_LDAT_DATA_IN_x(ch, x) _MCMAIN_C_X(0x42dc, ch, x) /* x in 0 .. 1 */
  526. +#define PM_THRT_CKE_MIN_ch(ch) _MCMAIN_C(0x4328, ch)
  527. +
  528. #define REUT_GLOBAL_CTL 0x4800
  529. #define REUT_GLOBAL_ERR 0x4804
  530. @@ -175,6 +178,8 @@
  531. #define MCSCHEDS_DFT_MISC 0x4c30
  532. +#define PM_PDWN_CONFIG 0x4cb0
  533. +
  534. #define REUT_ERR_DATA_STATUS 0x4ce0
  535. #define REUT_MISC_CKE_CTRL 0x4d90
  536. @@ -186,8 +191,10 @@
  537. #define MAD_CHNL 0x5000 /* Address Decoder Channel Configuration */
  538. #define MAD_DIMM(ch) (0x5004 + (ch) * 4)
  539. #define MAD_ZR 0x5014
  540. +#define MCDECS_CBIT 0x501c
  541. #define MC_INIT_STATE_G 0x5030
  542. #define MRC_REVISION 0x5034 /* MRC Revision */
  543. +#define PM_SREF_CONFIG 0x5060
  544. #define RCOMP_TIMER 0x5084
  545. --
  546. 2.39.2