0045-haswell-NRI-Configure-initial-MC-settings.patch 53 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595
  1. From 0001039f5ea6be6700a453f511069be2ce1b4e7e Mon Sep 17 00:00:00 2001
  2. From: Angel Pons <th3fanbus@gmail.com>
  3. Date: Sat, 7 May 2022 17:22:07 +0200
  4. Subject: [PATCH 03/17] haswell NRI: Configure initial MC settings
  5. Program initial memory controller settings. Many of these values will be
  6. adjusted later during training.
  7. Change-Id: If33846b51cb1bab5d0458fe626e13afb1bdc900e
  8. Signed-off-by: Angel Pons <th3fanbus@gmail.com>
  9. ---
  10. .../intel/haswell/native_raminit/Makefile.mk | 2 +
  11. .../haswell/native_raminit/configure_mc.c | 822 ++++++++++++++++++
  12. .../haswell/native_raminit/raminit_main.c | 2 +
  13. .../haswell/native_raminit/raminit_native.h | 101 +++
  14. .../haswell/native_raminit/reg_structs.h | 405 +++++++++
  15. .../haswell/native_raminit/timings_refresh.c | 13 +
  16. .../intel/haswell/registers/mchbar.h | 94 ++
  17. 7 files changed, 1439 insertions(+)
  18. create mode 100644 src/northbridge/intel/haswell/native_raminit/configure_mc.c
  19. create mode 100644 src/northbridge/intel/haswell/native_raminit/reg_structs.h
  20. create mode 100644 src/northbridge/intel/haswell/native_raminit/timings_refresh.c
  21. diff --git a/src/northbridge/intel/haswell/native_raminit/Makefile.mk b/src/northbridge/intel/haswell/native_raminit/Makefile.mk
  22. index 2769e0bbb4..fc55277a65 100644
  23. --- a/src/northbridge/intel/haswell/native_raminit/Makefile.mk
  24. +++ b/src/northbridge/intel/haswell/native_raminit/Makefile.mk
  25. @@ -1,8 +1,10 @@
  26. ## SPDX-License-Identifier: GPL-2.0-or-later
  27. +romstage-y += configure_mc.c
  28. romstage-y += lookup_timings.c
  29. romstage-y += init_mpll.c
  30. romstage-y += io_comp_control.c
  31. romstage-y += raminit_main.c
  32. romstage-y += raminit_native.c
  33. romstage-y += spd_bitmunching.c
  34. +romstage-y += timings_refresh.c
  35. diff --git a/src/northbridge/intel/haswell/native_raminit/configure_mc.c b/src/northbridge/intel/haswell/native_raminit/configure_mc.c
  36. new file mode 100644
  37. index 0000000000..88249725a7
  38. --- /dev/null
  39. +++ b/src/northbridge/intel/haswell/native_raminit/configure_mc.c
  40. @@ -0,0 +1,822 @@
  41. +/* SPDX-License-Identifier: GPL-2.0-or-later */
  42. +
  43. +#include <assert.h>
  44. +#include <commonlib/bsd/clamp.h>
  45. +#include <console/console.h>
  46. +#include <delay.h>
  47. +#include <lib.h>
  48. +#include <northbridge/intel/haswell/haswell.h>
  49. +#include <string.h>
  50. +#include <types.h>
  51. +
  52. +#include "raminit_native.h"
  53. +
  54. +static void program_misc_control(struct sysinfo *ctrl)
  55. +{
  56. + if (!is_hsw_ult())
  57. + return;
  58. +
  59. + const union ddr_scram_misc_control_reg ddr_scram_misc_ctrl = {
  60. + .ddr_no_ch_interleave = !ctrl->dq_pins_interleaved,
  61. + .lpddr_mode = ctrl->lpddr,
  62. + .cke_mapping_ch0 = ctrl->lpddr ? ctrl->lpddr_cke_rank_map[0] : 0,
  63. + .cke_mapping_ch1 = ctrl->lpddr ? ctrl->lpddr_cke_rank_map[1] : 0,
  64. + };
  65. + mchbar_write32(DDR_SCRAM_MISC_CONTROL, ddr_scram_misc_ctrl.raw);
  66. +}
  67. +
  68. +static void program_mrc_revision(void)
  69. +{
  70. + mchbar_write32(MRC_REVISION, 0x01090000); /* MRC 1.9.0 Build 0 */
  71. +}
  72. +
  73. +static void program_ranks_used(struct sysinfo *ctrl)
  74. +{
  75. + for (uint8_t channel = 0; channel < NUM_CHANNELS; channel++) {
  76. + mchbar_write8(MC_INIT_STATE_ch(channel), ctrl->rankmap[channel]);
  77. + if (!does_ch_exist(ctrl, channel)) {
  78. + mchbar_write32(DDR_CLK_ch_RANKS_USED(channel), 0);
  79. + mchbar_write32(DDR_CTL_ch_CTL_RANKS_USED(channel), 0);
  80. + mchbar_write32(DDR_CKE_ch_CTL_RANKS_USED(channel), 0);
  81. + continue;
  82. + }
  83. + uint32_t clk_ranks_used = ctrl->rankmap[channel];
  84. + if (ctrl->lpddr) {
  85. + /* With LPDDR, the clock usage goes by group instead */
  86. + clk_ranks_used = 0;
  87. + for (uint8_t group = 0; group < NUM_GROUPS; group++) {
  88. + if (ctrl->dq_byte_map[channel][CT_ITERATION_CLOCK][group])
  89. + clk_ranks_used |= BIT(group);
  90. + }
  91. + }
  92. + mchbar_write32(DDR_CLK_ch_RANKS_USED(channel), clk_ranks_used);
  93. +
  94. + uint32_t ctl_ranks_used = ctrl->rankmap[channel];
  95. + if (is_hsw_ult()) {
  96. + /* Set ODT disable bits */
  97. + /** TODO: May need to do this after JEDEC reset/init **/
  98. + if (ctrl->lpddr && ctrl->lpddr_dram_odt)
  99. + ctl_ranks_used |= 2 << 4; /* ODT is used on rank 0 */
  100. + else
  101. + ctl_ranks_used |= 3 << 4;
  102. + }
  103. + mchbar_write32(DDR_CTL_ch_CTL_RANKS_USED(channel), ctl_ranks_used);
  104. +
  105. + uint32_t cke_ranks_used = ctrl->rankmap[channel];
  106. + if (ctrl->lpddr) {
  107. + /* Use CKE-to-rank mapping for LPDDR */
  108. + const uint8_t cke_rank_map = ctrl->lpddr_cke_rank_map[channel];
  109. + cke_ranks_used = 0;
  110. + for (uint8_t rank = 0; rank < NUM_SLOTRANKS; rank++) {
  111. + /* ULT only has 2 ranks per channel */
  112. + if (rank >= 2)
  113. + break;
  114. +
  115. + if (!rank_in_ch(ctrl, rank, channel))
  116. + continue;
  117. +
  118. + for (uint8_t cke = 0; cke < 4; cke++) {
  119. + if (rank == ((cke_rank_map >> cke) & 1))
  120. + cke_ranks_used |= BIT(cke);
  121. + }
  122. + }
  123. + }
  124. + mchbar_write32(DDR_CKE_ch_CTL_RANKS_USED(channel), cke_ranks_used);
  125. + }
  126. +}
  127. +
  128. +static const uint8_t rxb_trad[2][5][4] = {
  129. + { /* Vdd low */
  130. + /* 1067 MT/s, 1333 MT/s, 1600 MT/s, 1867 MT/s, 2133 MT/s, */
  131. + {4, 3, 3, 2}, {4, 4, 3, 2}, {5, 4, 3, 3}, {5, 4, 4, 3}, {5, 4, 4, 3},
  132. + },
  133. + { /* Vdd hi */
  134. + /* 1067 MT/s, 1333 MT/s, 1600 MT/s, 1867 MT/s, 2133 MT/s, */
  135. + {4, 3, 3, 2}, {4, 4, 3, 2}, {5, 4, 3, 3}, {5, 4, 4, 3}, {4, 4, 3, 3},
  136. + },
  137. +};
  138. +
  139. +static const uint8_t rxb_ultx[2][3][4] = {
  140. + { /* Vdd low */
  141. + /* 1067 MT/s, 1333 MT/s, 1600 MT/s, */
  142. + {5, 6, 6, 5}, {5, 6, 6, 5}, {4, 6, 6, 6},
  143. + },
  144. + { /* Vdd hi */
  145. + /* 1067 MT/s, 1333 MT/s, 1600 MT/s, */
  146. + {7, 6, 6, 5}, {7, 6, 6, 5}, {7, 6, 6, 6},
  147. + },
  148. +};
  149. +
  150. +uint8_t get_rx_bias(const struct sysinfo *ctrl)
  151. +{
  152. + const bool is_ult = is_hsw_ult();
  153. + const bool vddhi = ctrl->vdd_mv > 1350;
  154. + const uint8_t max_rxf = is_ult ? ARRAY_SIZE(rxb_ultx[0]) : ARRAY_SIZE(rxb_trad[0]);
  155. + const uint8_t ref_clk = ctrl->base_freq == 133 ? 4 : 6;
  156. + const uint8_t rx_f = clamp_s8(0, ctrl->multiplier - ref_clk, max_rxf - 1);
  157. + const uint8_t rx_cb = mchbar_read32(DDR_CLK_CB_STATUS) & 0x3;
  158. + if (is_ult)
  159. + return rxb_ultx[vddhi][rx_f][rx_cb];
  160. + else
  161. + return rxb_trad[vddhi][rx_f][rx_cb];
  162. +}
  163. +
  164. +static void program_ddr_data(struct sysinfo *ctrl, const bool dis_odt_static, const bool vddhi)
  165. +{
  166. + const bool is_ult = is_hsw_ult();
  167. +
  168. + for (uint8_t rank = 0; rank < NUM_SLOTRANKS; rank++) {
  169. + if (!does_rank_exist(ctrl, rank))
  170. + continue;
  171. +
  172. + const union ddr_data_rx_train_rank_reg rx_train = {
  173. + .rcven = 64,
  174. + .dqs_p = 32,
  175. + .dqs_n = 32,
  176. + };
  177. + mchbar_write32(DDR_DATA_RX_TRAIN_RANK(rank), rx_train.raw);
  178. + mchbar_write32(DDR_DATA_RX_PER_BIT_RANK(rank), 0x88888888);
  179. +
  180. + const union ddr_data_tx_train_rank_reg tx_train = {
  181. + .tx_eq = TXEQFULLDRV | 11,
  182. + .dq_delay = 96,
  183. + .dqs_delay = 64,
  184. + };
  185. + mchbar_write32(DDR_DATA_TX_TRAIN_RANK(rank), tx_train.raw);
  186. + mchbar_write32(DDR_DATA_TX_PER_BIT_RANK(rank), 0x88888888);
  187. +
  188. + for (uint8_t channel = 0; channel < NUM_CHANNELS; channel++) {
  189. + for (uint8_t byte = 0; byte < ctrl->lanes; byte++) {
  190. + ctrl->tx_dq[channel][rank][byte] = tx_train.dq_delay;
  191. + ctrl->txdqs[channel][rank][byte] = tx_train.dqs_delay;
  192. + ctrl->tx_eq[channel][rank][byte] = tx_train.tx_eq;
  193. +
  194. + ctrl->rcven[channel][rank][byte] = rx_train.rcven;
  195. + ctrl->rxdqsp[channel][rank][byte] = rx_train.dqs_p;
  196. + ctrl->rxdqsn[channel][rank][byte] = rx_train.dqs_n;
  197. + ctrl->rx_eq[channel][rank][byte] = rx_train.rx_eq;
  198. + }
  199. + }
  200. + }
  201. + mchbar_write32(DDR_DATA_TX_XTALK, 0);
  202. + mchbar_write32(DDR_DATA_RX_OFFSET_VDQ, 0x88888888);
  203. + mchbar_write32(DDR_DATA_OFFSET_TRAIN, 0);
  204. + mchbar_write32(DDR_DATA_OFFSET_COMP, 0);
  205. +
  206. + const union ddr_data_control_0_reg data_control_0 = {
  207. + .internal_clocks_on = !is_ult,
  208. + .data_vccddq_hi = vddhi,
  209. + .disable_odt_static = dis_odt_static,
  210. + .lpddr_mode = ctrl->lpddr,
  211. + .odt_samp_extend_en = ctrl->lpddr,
  212. + .early_rleak_en = ctrl->lpddr && ctrl->stepping >= STEPPING_C0,
  213. + };
  214. + mchbar_write32(DDR_DATA_CONTROL_0, data_control_0.raw);
  215. +
  216. + const union ddr_data_control_1_reg data_control_1 = {
  217. + .dll_mask = 1,
  218. + .rx_bias_ctl = get_rx_bias(ctrl),
  219. + .odt_delay = -2,
  220. + .odt_duration = 7,
  221. + .sense_amp_delay = -2,
  222. + .sense_amp_duration = 7,
  223. + };
  224. + mchbar_write32(DDR_DATA_CONTROL_1, data_control_1.raw);
  225. +
  226. + clear_data_offset_train_all(ctrl);
  227. +
  228. + /* Stagger byte turn-on to reduce dI/dT */
  229. + const uint8_t byte_stagger[] = { 0, 4, 1, 5, 2, 6, 3, 7, 8 };
  230. + const uint8_t latency = 2 * ctrl->tAA - 6;
  231. + for (uint8_t channel = 0; channel < NUM_CHANNELS; channel++) {
  232. + if (!does_ch_exist(ctrl, channel))
  233. + continue;
  234. +
  235. + union ddr_data_control_2_reg data_control_2 = {
  236. + .raw = 0,
  237. + };
  238. + if (is_ult) {
  239. + data_control_2.rx_dqs_amp_offset = 8;
  240. + data_control_2.rx_clk_stg_num = 0x1f;
  241. + data_control_2.leaker_comp = ctrl->lpddr ? 3 : 0;
  242. + }
  243. + for (uint8_t byte = 0; byte < ctrl->lanes; byte++) {
  244. + const uint8_t stg = latency * byte_stagger[byte] / ctrl->lanes;
  245. + data_control_2.rx_stagger_ctl = stg & 0x1f;
  246. + mchbar_write32(DQ_CONTROL_2(channel, byte), data_control_2.raw);
  247. + ctrl->data_offset_comp[channel][byte] = 0;
  248. + ctrl->dq_control_1[channel][byte] = data_control_1.raw;
  249. + ctrl->dq_control_2[channel][byte] = data_control_2.raw;
  250. + }
  251. + ctrl->dq_control_0[channel] = data_control_0.raw;
  252. + }
  253. +}
  254. +
  255. +static void program_vsshi_control(struct sysinfo *ctrl, const uint16_t vsshi_mv)
  256. +{
  257. + const uint32_t vsshi_control_reg = is_hsw_ult() ? 0x366c : 0x306c;
  258. + const union ddr_comp_vsshi_control_reg ddr_vsshi_control = {
  259. + .vsshi_target = (vsshi_mv * 192) / ctrl->vdd_mv - 20,
  260. + .hi_bw_divider = 1,
  261. + .lo_bw_divider = 1,
  262. + .bw_error = 2,
  263. + .panic_driver_en = 1,
  264. + .panic_voltage = 24 / 8, /* Voltage in 8mV steps */
  265. + .gain_boost = 1,
  266. + };
  267. + mchbar_write32(vsshi_control_reg, ddr_vsshi_control.raw);
  268. + mchbar_write32(DDR_COMP_VSSHI_CONTROL, ddr_vsshi_control.raw);
  269. +}
  270. +
  271. +static void calc_vt_slope_code(const uint16_t slope, uint8_t *best_a, uint8_t *best_b)
  272. +{
  273. + const int16_t coding[] = {0, -125, -62, -31, 250, 125, 62, 31};
  274. + *best_a = 0;
  275. + *best_b = 0;
  276. + int16_t best_err = slope;
  277. + for (uint8_t b = 0; b < ARRAY_SIZE(coding); b++) {
  278. + for (uint8_t a = b; a < ARRAY_SIZE(coding); a++) {
  279. + int16_t error = slope - (coding[a] + coding[b]);
  280. + if (error < 0)
  281. + error = -error;
  282. +
  283. + if (error < best_err) {
  284. + best_err = error;
  285. + *best_a = a;
  286. + *best_b = b;
  287. + }
  288. + }
  289. + }
  290. +}
  291. +
  292. +static void program_dimm_vref(struct sysinfo *ctrl, const uint16_t vccio_mv, const bool vddhi)
  293. +{
  294. + const bool is_ult = is_hsw_ult();
  295. +
  296. + /* Static values for ULT */
  297. + uint8_t vt_slope_a = 4;
  298. + uint8_t vt_slope_b = 0;
  299. + if (!is_ult) {
  300. + /* On non-ULT, compute best slope code */
  301. + const uint16_t vt_slope = 1500 * vccio_mv / ctrl->vdd_mv - 1000;
  302. + calc_vt_slope_code(vt_slope, &vt_slope_a, &vt_slope_b);
  303. + }
  304. + const union ddr_data_vref_control_reg ddr_vref_control = {
  305. + .hi_bw_divider = is_ult ? 0 : 3,
  306. + .lo_bw_divider = 3,
  307. + .sample_divider = is_ult ? 1 : 3,
  308. + .slow_bw_error = 1,
  309. + .hi_bw_enable = 1,
  310. + .vt_slope_b = vt_slope_b,
  311. + .vt_slope_a = vt_slope_a,
  312. + .vt_offset = 0,
  313. + };
  314. + mchbar_write32(is_ult ? 0xf68 : 0xf6c, ddr_vref_control.raw); /* Use CH1 byte 7 */
  315. +
  316. + const union ddr_data_vref_adjust_reg ddr_vref_adjust = {
  317. + .en_dimm_vref_ca = 1,
  318. + .en_dimm_vref_ch0 = 1,
  319. + .en_dimm_vref_ch1 = 1,
  320. + .vccddq_hi_qnnn_h = vddhi,
  321. + .hi_z_timer_ctrl = 3,
  322. + };
  323. + ctrl->dimm_vref = ddr_vref_adjust;
  324. + mchbar_write32(DDR_DATA_VREF_ADJUST, ddr_vref_adjust.raw);
  325. +}
  326. +
  327. +static uint32_t pi_code(const uint32_t code)
  328. +{
  329. + return code << 21 | code << 14 | code << 7 | code << 0;
  330. +}
  331. +
  332. +static void program_ddr_ca(struct sysinfo *ctrl, const bool vddhi)
  333. +{
  334. + for (uint8_t channel = 0; channel < NUM_CHANNELS; channel++) {
  335. + if (!does_ch_exist(ctrl, channel))
  336. + continue;
  337. +
  338. + const union ddr_clk_controls_reg ddr_clk_controls = {
  339. + .dll_mask = 1,
  340. + .vccddq_hi = vddhi,
  341. + .lpddr_mode = ctrl->lpddr,
  342. + };
  343. + mchbar_write32(DDR_CLK_ch_CONTROLS(channel), ddr_clk_controls.raw);
  344. +
  345. + const union ddr_cmd_controls_reg ddr_cmd_controls = {
  346. + .dll_mask = 1,
  347. + .vccddq_hi = vddhi,
  348. + .lpddr_mode = ctrl->lpddr,
  349. + .early_weak_drive = 3,
  350. + .cmd_tx_eq = 1,
  351. + };
  352. + mchbar_write32(DDR_CMD_ch_CONTROLS(channel), ddr_cmd_controls.raw);
  353. +
  354. + const union ddr_cke_ctl_controls_reg ddr_cke_controls = {
  355. + .dll_mask = 1,
  356. + .vccddq_hi = vddhi,
  357. + .lpddr_mode = ctrl->lpddr,
  358. + .early_weak_drive = 3,
  359. + .cmd_tx_eq = 1,
  360. + .ctl_tx_eq = 1,
  361. + .ctl_sr_drv = 2,
  362. + };
  363. + mchbar_write32(DDR_CKE_ch_CTL_CONTROLS(channel), ddr_cke_controls.raw);
  364. +
  365. + const union ddr_cke_ctl_controls_reg ddr_ctl_controls = {
  366. + .dll_mask = 1,
  367. + .vccddq_hi = vddhi,
  368. + .lpddr_mode = ctrl->lpddr,
  369. + .ctl_tx_eq = 1,
  370. + .ctl_sr_drv = 2,
  371. + .la_drv_en_ovrd = 1, /* Must be set on ULT */
  372. + };
  373. + mchbar_write32(DDR_CTL_ch_CTL_CONTROLS(channel), ddr_ctl_controls.raw);
  374. +
  375. + const uint8_t cmd_pi = ctrl->lpddr ? 96 : 64;
  376. + mchbar_write32(DDR_CMD_ch_PI_CODING(channel), pi_code(cmd_pi));
  377. + mchbar_write32(DDR_CKE_ch_CMD_PI_CODING(channel), pi_code(cmd_pi));
  378. + mchbar_write32(DDR_CKE_CTL_ch_CTL_PI_CODING(channel), pi_code(64));
  379. + mchbar_write32(DDR_CLK_ch_PI_CODING(channel), pi_code(64));
  380. +
  381. + mchbar_write32(DDR_CMD_ch_COMP_OFFSET(channel), 0);
  382. + mchbar_write32(DDR_CLK_ch_COMP_OFFSET(channel), 0);
  383. + mchbar_write32(DDR_CKE_CTL_ch_CTL_COMP_OFFSET(channel), 0);
  384. +
  385. + for (uint8_t group = 0; group < NUM_GROUPS; group++) {
  386. + ctrl->cke_cmd_pi_code[channel][group] = cmd_pi;
  387. + ctrl->cmd_north_pi_code[channel][group] = cmd_pi;
  388. + ctrl->cmd_south_pi_code[channel][group] = cmd_pi;
  389. + }
  390. + for (uint8_t rank = 0; rank < NUM_SLOTRANKS; rank++) {
  391. + ctrl->clk_pi_code[channel][rank] = 64;
  392. + ctrl->ctl_pi_code[channel][rank] = 64;
  393. + }
  394. + }
  395. +}
  396. +
  397. +enum {
  398. + RCOMP_RD_ODT = 0,
  399. + RCOMP_WR_DS_DQ,
  400. + RCOMP_WR_DS_CMD,
  401. + RCOMP_WR_DS_CTL,
  402. + RCOMP_WR_DS_CLK,
  403. + RCOMP_MAX_CODES,
  404. +};
  405. +
  406. +struct rcomp_info {
  407. + uint8_t resistor;
  408. + uint8_t sz_steps;
  409. + uint8_t target_r;
  410. + int8_t result;
  411. +};
  412. +
  413. +static void program_rcomp_vref(struct sysinfo *ctrl, const bool dis_odt_static)
  414. +{
  415. + const bool is_ult = is_hsw_ult();
  416. + /*
  417. + * +-------------------------------+
  418. + * | Rcomp resistor values in ohms |
  419. + * +-----------+------+------+-----+
  420. + * | Ball name | Trad | ULTX | Use |
  421. + * +-----------+------+------+-----+
  422. + * | SM_RCOMP0 | 100 | 200 | CMD |
  423. + * | SM_RCOMP1 | 75 | 120 | DQ |
  424. + * | SM_RCOMP2 | 100 | 100 | ODT |
  425. + * +-----------+------+------+-----+
  426. + */
  427. + struct rcomp_info rcomp_cfg[RCOMP_MAX_CODES] = {
  428. + [RCOMP_RD_ODT] = {
  429. + .resistor = 50,
  430. + .sz_steps = 96,
  431. + .target_r = 50,
  432. + },
  433. + [RCOMP_WR_DS_DQ] = {
  434. + .resistor = 25,
  435. + .sz_steps = 64,
  436. + .target_r = 33,
  437. + },
  438. + [RCOMP_WR_DS_CMD] = {
  439. + .resistor = 20,
  440. + .sz_steps = 64,
  441. + .target_r = 20,
  442. + },
  443. + [RCOMP_WR_DS_CTL] = {
  444. + .resistor = 20,
  445. + .sz_steps = 64,
  446. + .target_r = 20,
  447. + },
  448. + [RCOMP_WR_DS_CLK] = {
  449. + .resistor = 25,
  450. + .sz_steps = 64,
  451. + .target_r = 29,
  452. + },
  453. + };
  454. + if (is_ult) {
  455. + rcomp_cfg[RCOMP_WR_DS_DQ].resistor = 40;
  456. + rcomp_cfg[RCOMP_WR_DS_DQ].target_r = 40;
  457. + rcomp_cfg[RCOMP_WR_DS_CLK].resistor = 40;
  458. + } else if (ctrl->dpc[0] == 2 || ctrl->dpc[1] == 2) {
  459. + rcomp_cfg[RCOMP_RD_ODT].target_r = 60;
  460. + }
  461. + for (uint8_t i = 0; i < RCOMP_MAX_CODES; i++) {
  462. + struct rcomp_info *const r = &rcomp_cfg[i];
  463. + const int32_t div = 2 * (r->resistor + r->target_r);
  464. + assert(div);
  465. + const int32_t vref = (r->sz_steps * (r->resistor - r->target_r)) / div;
  466. +
  467. + /* DqOdt is 5 bits wide, the other Rcomp targets are 4 bits wide */
  468. + const int8_t comp_limit = i == RCOMP_RD_ODT ? 16 : 8;
  469. + r->result = clamp_s32(-comp_limit, vref, comp_limit - 1);
  470. + }
  471. + const union ddr_comp_ctl_0_reg ddr_comp_ctl_0 = {
  472. + .disable_odt_static = dis_odt_static,
  473. + .dq_drv_vref = rcomp_cfg[RCOMP_WR_DS_DQ].result,
  474. + .dq_odt_vref = rcomp_cfg[RCOMP_RD_ODT].result,
  475. + .cmd_drv_vref = rcomp_cfg[RCOMP_WR_DS_CMD].result,
  476. + .ctl_drv_vref = rcomp_cfg[RCOMP_WR_DS_CTL].result,
  477. + .clk_drv_vref = rcomp_cfg[RCOMP_WR_DS_CLK].result,
  478. + };
  479. + ctrl->comp_ctl_0 = ddr_comp_ctl_0;
  480. + mchbar_write32(DDR_COMP_CTL_0, ctrl->comp_ctl_0.raw);
  481. +}
  482. +
  483. +enum {
  484. + SCOMP_DQ = 0,
  485. + SCOMP_CMD,
  486. + SCOMP_CTL,
  487. + SCOMP_CLK,
  488. + SCOMP_MAX_CODES,
  489. +};
  490. +
  491. +static void program_slew_rates(struct sysinfo *ctrl, const bool vddhi)
  492. +{
  493. + const uint8_t min_cycle_delay[SCOMP_MAX_CODES] = { 46, 70, 70, 46 };
  494. + uint8_t buffer_stage_delay_ps[SCOMP_MAX_CODES] = { 59, 53, 53, 53 };
  495. + uint16_t comp_slew_rate_codes[SCOMP_MAX_CODES];
  496. +
  497. + /* CMD Slew Rate = 1.8 for 2N */
  498. + if (ctrl->tCMD == 2)
  499. + buffer_stage_delay_ps[SCOMP_CMD] = 89;
  500. +
  501. + /* CMD Slew Rate = 4 V/ns for double-pumped CMD bus */
  502. + if (ctrl->lpddr)
  503. + buffer_stage_delay_ps[SCOMP_CMD] = 63;
  504. +
  505. + for (uint8_t i = 0; i < SCOMP_MAX_CODES; i++) {
  506. + uint16_t stages = DIV_ROUND_CLOSEST(ctrl->qclkps, buffer_stage_delay_ps[i]);
  507. + if (stages < 5)
  508. + stages = 5;
  509. +
  510. + bool dll_pc = buffer_stage_delay_ps[i] < min_cycle_delay[i] || stages > 16;
  511. +
  512. + /* Lock DLL... */
  513. + if (dll_pc)
  514. + comp_slew_rate_codes[i] = stages / 2 - 1; /* to a phase */
  515. + else
  516. + comp_slew_rate_codes[i] = (stages - 1) | BIT(4); /* to a cycle */
  517. + }
  518. + union ddr_comp_ctl_1_reg ddr_comp_ctl_1 = {
  519. + .dq_scomp = comp_slew_rate_codes[SCOMP_DQ],
  520. + .cmd_scomp = comp_slew_rate_codes[SCOMP_CMD],
  521. + .ctl_scomp = comp_slew_rate_codes[SCOMP_CTL],
  522. + .clk_scomp = comp_slew_rate_codes[SCOMP_CLK],
  523. + .vccddq_hi = vddhi,
  524. + };
  525. + ctrl->comp_ctl_1 = ddr_comp_ctl_1;
  526. + mchbar_write32(DDR_COMP_CTL_1, ctrl->comp_ctl_1.raw);
  527. +}
  528. +
  529. +static uint32_t ln_x100(const uint32_t input_x100)
  530. +{
  531. + uint32_t val = input_x100;
  532. + uint32_t ret = 0;
  533. + while (val > 271) {
  534. + val = (val * 1000) / 2718;
  535. + ret += 100;
  536. + }
  537. + return ret + (-16 * val * val + 11578 * val - 978860) / 10000;
  538. +}
  539. +
  540. +static uint32_t compute_vsshi_vref(struct sysinfo *ctrl, const uint32_t vsshi_tgt, bool up)
  541. +{
  542. + const uint32_t delta = 15;
  543. + const uint32_t c_die_vsshi = 2000;
  544. + const uint32_t r_cmd_ref = 100 * 10;
  545. + const uint32_t offset = up ? 64 : 0;
  546. + const uint32_t ln_vsshi = ln_x100((100 * vsshi_tgt) / (vsshi_tgt - delta));
  547. + const uint32_t r_target = (ctrl->qclkps * 2000) / (c_die_vsshi * ln_vsshi);
  548. + const uint32_t r_dividend = 128 * (up ? r_cmd_ref : r_target);
  549. + return r_dividend / (r_cmd_ref + r_target) - offset;
  550. +}
  551. +
  552. +static void program_vsshi(struct sysinfo *ctrl, const uint16_t vccio_mv, const uint16_t vsshi)
  553. +{
  554. + const uint16_t vsshi_down = vsshi + 24; /* Panic threshold of 24 mV */
  555. + const uint16_t vsshi_up = vccio_mv - vsshi_down;
  556. + const union ddr_comp_vsshi_reg ddr_comp_vsshi = {
  557. + .panic_drv_down_vref = compute_vsshi_vref(ctrl, vsshi_down, false),
  558. + .panic_drv_up_vref = compute_vsshi_vref(ctrl, vsshi_up, true),
  559. + .vt_offset = 128 * 450 / vccio_mv / 2,
  560. + .vt_slope_a = 4,
  561. + };
  562. + mchbar_write32(DDR_COMP_VSSHI, ddr_comp_vsshi.raw);
  563. +}
  564. +
  565. +static void program_misc(struct sysinfo *ctrl)
  566. +{
  567. + ctrl->misc_control_0.raw = mchbar_read32(DDR_SCRAM_MISC_CONTROL);
  568. + ctrl->misc_control_0.weaklock_latency = 12;
  569. + ctrl->misc_control_0.wl_sleep_cycles = 5;
  570. + ctrl->misc_control_0.wl_wake_cycles = 2;
  571. + mchbar_write32(DDR_SCRAM_MISC_CONTROL, ctrl->misc_control_0.raw);
  572. + for (uint8_t channel = 0; channel < NUM_CHANNELS; channel++) {
  573. + /* Keep scrambling disabled for training */
  574. + mchbar_write32(DDR_SCRAMBLE_ch(channel), 0);
  575. + }
  576. +}
  577. +
  578. +/* Very weird, application-specific function */
  579. +static void override_comp(uint32_t value, uint32_t width, uint32_t shift, uint32_t offset)
  580. +{
  581. + const uint32_t mask = (1 << width) - 1;
  582. + uint32_t reg32 = mchbar_read32(offset);
  583. + reg32 &= ~(mask << shift);
  584. + reg32 |= (value << shift);
  585. + mchbar_write32(offset, reg32);
  586. +}
  587. +
  588. +static void program_ls_comp(struct sysinfo *ctrl)
  589. +{
  590. + /* Disable periodic COMP */
  591. + const union pcu_comp_reg m_comp = {
  592. + .comp_disable = 1,
  593. + .comp_interval = COMP_INT,
  594. + .comp_force = 1,
  595. + };
  596. + mchbar_write32(M_COMP, m_comp.raw);
  597. + udelay(10);
  598. +
  599. + /* Override level shifter compensation */
  600. + const uint32_t ls_comp = 2;
  601. + override_comp(ls_comp, 3, 28, DDR_DATA_RCOMP_DATA_1);
  602. + override_comp(ls_comp, 3, 24, DDR_CMD_COMP);
  603. + override_comp(ls_comp, 3, 24, DDR_CKE_CTL_COMP);
  604. + override_comp(ls_comp, 3, 23, DDR_CLK_COMP);
  605. + override_comp(ls_comp, 3, 28, DDR_COMP_DATA_COMP_1);
  606. + override_comp(ls_comp, 3, 24, DDR_COMP_CMD_COMP);
  607. + override_comp(ls_comp, 4, 24, DDR_COMP_CTL_COMP);
  608. + override_comp(ls_comp, 4, 23, DDR_COMP_CLK_COMP);
  609. + override_comp(ls_comp, 3, 24, DDR_COMP_OVERRIDE);
  610. +
  611. + /* Manually update the COMP values */
  612. + union ddr_scram_misc_control_reg ddr_scram_misc_ctrl = ctrl->misc_control_0;
  613. + ddr_scram_misc_ctrl.force_comp_update = 1;
  614. + mchbar_write32(DDR_SCRAM_MISC_CONTROL, ddr_scram_misc_ctrl.raw);
  615. +
  616. + /* Use a fixed offset between ODT Up/Dn */
  617. + const union ddr_comp_data_comp_1_reg data_comp_1 = {
  618. + .raw = mchbar_read32(DDR_COMP_DATA_COMP_1),
  619. + };
  620. + const uint32_t odt_offset = data_comp_1.rcomp_odt_down - data_comp_1.rcomp_odt_up;
  621. + ctrl->comp_ctl_0.odt_up_down_off = odt_offset;
  622. + ctrl->comp_ctl_0.fixed_odt_offset = 1;
  623. + mchbar_write32(DDR_COMP_CTL_0, ctrl->comp_ctl_0.raw);
  624. +}
  625. +
  626. +/** TODO: Deduplicate PCODE stuff, it's already implemented in CPU code **/
  627. +static bool pcode_ready(void)
  628. +{
  629. + const unsigned int delay_step = 10;
  630. + for (unsigned int i = 0; i < 1000; i += delay_step) {
  631. + if (!(mchbar_read32(BIOS_MAILBOX_INTERFACE) & MAILBOX_RUN_BUSY))
  632. + return true;
  633. +
  634. + udelay(delay_step);
  635. + };
  636. + return false;
  637. +}
  638. +
  639. +static uint32_t pcode_mailbox_read(const uint32_t command)
  640. +{
  641. + if (!pcode_ready()) {
  642. + printk(BIOS_ERR, "PCODE: mailbox timeout on wait ready\n");
  643. + return 0;
  644. + }
  645. + mchbar_write32(BIOS_MAILBOX_INTERFACE, command | MAILBOX_RUN_BUSY);
  646. + if (!pcode_ready()) {
  647. + printk(BIOS_ERR, "PCODE: mailbox timeout on completion\n");
  648. + return 0;
  649. + }
  650. + return mchbar_read32(BIOS_MAILBOX_DATA);
  651. +}
  652. +
  653. +static int pcode_mailbox_write(const uint32_t command, const uint32_t data)
  654. +{
  655. + if (!pcode_ready()) {
  656. + printk(BIOS_ERR, "PCODE: mailbox timeout on wait ready\n");
  657. + return -1;
  658. + }
  659. + mchbar_write32(BIOS_MAILBOX_DATA, data);
  660. + mchbar_write32(BIOS_MAILBOX_INTERFACE, command | MAILBOX_RUN_BUSY);
  661. + if (!pcode_ready()) {
  662. + printk(BIOS_ERR, "PCODE: mailbox timeout on completion\n");
  663. + return -1;
  664. + }
  665. + return 0;
  666. +}
  667. +
  668. +static void enable_2x_refresh(struct sysinfo *ctrl)
  669. +{
  670. + if (!CONFIG(ENABLE_DDR_2X_REFRESH))
  671. + return;
  672. +
  673. + printk(BIOS_DEBUG, "Enabling 2x Refresh\n");
  674. + const bool asr = ctrl->flags.asr;
  675. + const bool lpddr = ctrl->lpddr;
  676. +
  677. + /* Mutually exclusive */
  678. + assert(!asr || !lpddr);
  679. + if (!asr) {
  680. + uint32_t reg32 = pcode_mailbox_read(MAILBOX_BIOS_CMD_READ_DDR_2X_REFRESH);
  681. + if (!(reg32 & BIT(31))) { /** TODO: What to do if this is locked? **/
  682. + reg32 |= BIT(0); /* Enable 2x refresh */
  683. + reg32 |= BIT(31); /* Lock */
  684. +
  685. + if (lpddr)
  686. + reg32 |= 4 << 1; /* LPDDR MR4 1/2 tREFI */
  687. +
  688. + if (pcode_mailbox_write(MAILBOX_BIOS_CMD_WRITE_DDR_2X_REFRESH, reg32))
  689. + printk(BIOS_ERR, "Could not enable Mailbox 2x Refresh\n");
  690. + }
  691. + if (!lpddr)
  692. + return;
  693. + }
  694. + assert(asr || lpddr);
  695. + uint16_t refi_reduction = 50;
  696. + if (lpddr) {
  697. + refi_reduction = 97;
  698. + mchbar_clrbits32(PCU_DDR_PTM_CTL, 1 << 7); /* DISABLE_DRAM_TS */
  699. + }
  700. + /** TODO: Remember why this is only done on cold boots **/
  701. + if (ctrl->bootmode == BOOTMODE_COLD) {
  702. + ctrl->tREFI *= refi_reduction;
  703. + ctrl->tREFI /= 100;
  704. + }
  705. +}
  706. +
  707. +static void set_pcu_ddr_voltage(const uint16_t vdd_mv)
  708. +{
  709. + /** TODO: Handle other voltages? **/
  710. + uint32_t pcu_ddr_voltage;
  711. + switch (vdd_mv) {
  712. + case 1200:
  713. + pcu_ddr_voltage = 3;
  714. + break;
  715. + case 1350:
  716. + pcu_ddr_voltage = 1;
  717. + break;
  718. + default:
  719. + case 1500:
  720. + pcu_ddr_voltage = 0;
  721. + break;
  722. + }
  723. + /* Set bits 0..2 */
  724. + mchbar_write32(PCU_DDR_VOLTAGE, pcu_ddr_voltage);
  725. +}
  726. +
  727. +static void program_scheduler(struct sysinfo *ctrl)
  728. +{
  729. + /*
  730. + * ZQ calibration needs to be serialized for LPDDR3. Otherwise,
  731. + * the processor issues LPDDR3 ZQ calibration in parallel when
  732. + * exiting Package C7 or deeper. This causes problems for dual
  733. + * and quad die packages since all ranks share the same ZQ pin.
  734. + *
  735. + * Erratum HSM94: LPDDR3 ZQ Calibration Following Deep Package
  736. + * C-state Exit May Lead to Unpredictable System Behavior
  737. + */
  738. + const union mcscheds_cbit_reg mcscheds_cbit = {
  739. + .dis_write_gap = 1,
  740. + .dis_odt = is_hsw_ult() && !(ctrl->lpddr && ctrl->lpddr_dram_odt),
  741. + .serialize_zq = ctrl->lpddr,
  742. + };
  743. + mchbar_write32(MCSCHEDS_CBIT, mcscheds_cbit.raw);
  744. + mchbar_write32(MCMNTS_SC_WDBWM, 0x553c3038);
  745. + if (ctrl->lpddr) {
  746. + for (uint8_t channel = 0; channel < NUM_CHANNELS; channel++) {
  747. + if (!does_ch_exist(ctrl, channel))
  748. + continue;
  749. +
  750. + union mcmain_command_rate_limit_reg cmd_rate_limit = {
  751. + .raw = mchbar_read32(COMMAND_RATE_LIMIT_ch(channel)),
  752. + };
  753. + cmd_rate_limit.enable_cmd_limit = 1;
  754. + cmd_rate_limit.cmd_rate_limit = 3;
  755. + mchbar_write32(COMMAND_RATE_LIMIT_ch(channel), cmd_rate_limit.raw);
  756. + }
  757. + }
  758. +}
  759. +
  760. +static uint8_t biggest_channel(const struct sysinfo *const ctrl)
  761. +{
  762. + _Static_assert(NUM_CHANNELS == 2, "Code assumes exactly two channels");
  763. + return !!(ctrl->channel_size_mb[0] < ctrl->channel_size_mb[1]);
  764. +}
  765. +
  766. +static void dram_zones(struct sysinfo *ctrl)
  767. +{
  768. + /** TODO: Activate channel hash here, if enabled **/
  769. + const uint8_t biggest = biggest_channel(ctrl);
  770. + const uint8_t smaller = !biggest;
  771. +
  772. + /** TODO: Use stacked mode if Memory Trace is enabled **/
  773. + const union mad_chnl_reg mad_channel = {
  774. + .ch_a = biggest,
  775. + .ch_b = smaller,
  776. + .ch_c = 2,
  777. + .lpddr_mode = ctrl->lpddr,
  778. + };
  779. + mchbar_write32(MAD_CHNL, mad_channel.raw);
  780. +
  781. + const uint8_t channel_b_zone_size = ctrl->channel_size_mb[smaller] / 256;
  782. + const union mad_zr_reg mad_zr = {
  783. + .ch_b_double = channel_b_zone_size * 2,
  784. + .ch_b_single = channel_b_zone_size,
  785. + };
  786. + mchbar_write32(MAD_ZR, mad_zr.raw);
  787. +}
  788. +
  789. +static uint8_t biggest_dimm(const struct raminit_dimm_info *dimms)
  790. +{
  791. + _Static_assert(NUM_SLOTS <= 2, "Code assumes at most two DIMMs per channel.");
  792. + if (NUM_SLOTS == 1)
  793. + return 0;
  794. +
  795. + return !!(dimms[0].data.size_mb < dimms[1].data.size_mb);
  796. +}
  797. +
  798. +static void dram_dimm_mapping(struct sysinfo *ctrl)
  799. +{
  800. + for (uint8_t channel = 0; channel < NUM_CHANNELS; channel++) {
  801. + if (!does_ch_exist(ctrl, channel)) {
  802. + const union mad_dimm_reg mad_dimm = {
  803. + .rank_interleave = 1,
  804. + .enh_interleave = 1,
  805. + };
  806. + mchbar_write32(MAD_DIMM(channel), mad_dimm.raw);
  807. + continue;
  808. + }
  809. + const uint8_t biggest = biggest_dimm(ctrl->dimms[channel]);
  810. + const uint8_t smaller = !biggest;
  811. + const struct dimm_attr_ddr3_st *dimm_a = &ctrl->dimms[channel][biggest].data;
  812. + const struct dimm_attr_ddr3_st *dimm_b = &ctrl->dimms[channel][smaller].data;
  813. + union mad_dimm_reg mad_dimm = {
  814. + .dimm_a_size = dimm_a->size_mb / 256,
  815. + .dimm_b_size = dimm_b->size_mb / 256,
  816. + .dimm_a_sel = biggest,
  817. + .dimm_a_ranks = dimm_a->ranks == 2,
  818. + .dimm_b_ranks = dimm_b->ranks == 2,
  819. + .dimm_a_width = dimm_a->width == 16,
  820. + .dimm_b_width = dimm_b->width == 16,
  821. + .rank_interleave = 1,
  822. + .enh_interleave = 1,
  823. + .ecc_mode = 0, /* Do not enable ECC yet */
  824. + };
  825. + if (is_hsw_ult())
  826. + mad_dimm.dimm_b_width = mad_dimm.dimm_a_width;
  827. +
  828. + mchbar_write32(MAD_DIMM(channel), mad_dimm.raw);
  829. + if (ctrl->lpddr)
  830. + die("%s: Missing LPDDR support (LPDDR_MR_PARAMS)\n", __func__);
  831. + }
  832. +}
  833. +
  834. +enum raminit_status configure_mc(struct sysinfo *ctrl)
  835. +{
  836. + const uint16_t vccio_mv = 1000;
  837. + const uint16_t vsshi_mv = ctrl->vdd_mv - 950;
  838. + const bool dis_odt_static = is_hsw_ult(); /* Disable static ODT legs on ULT */
  839. + const bool vddhi = ctrl->vdd_mv > 1350;
  840. +
  841. + program_misc_control(ctrl);
  842. + program_mrc_revision();
  843. + program_ranks_used(ctrl);
  844. + program_ddr_data(ctrl, dis_odt_static, vddhi);
  845. + program_vsshi_control(ctrl, vsshi_mv);
  846. + program_dimm_vref(ctrl, vccio_mv, vddhi);
  847. + program_ddr_ca(ctrl, vddhi);
  848. + program_rcomp_vref(ctrl, dis_odt_static);
  849. + program_slew_rates(ctrl, vddhi);
  850. + program_vsshi(ctrl, vccio_mv, vsshi_mv);
  851. + program_misc(ctrl);
  852. + program_ls_comp(ctrl);
  853. + enable_2x_refresh(ctrl);
  854. + set_pcu_ddr_voltage(ctrl->vdd_mv);
  855. + configure_timings(ctrl);
  856. + configure_refresh(ctrl);
  857. + program_scheduler(ctrl);
  858. + dram_zones(ctrl);
  859. + dram_dimm_mapping(ctrl);
  860. +
  861. + return RAMINIT_STATUS_SUCCESS;
  862. +}
  863. diff --git a/src/northbridge/intel/haswell/native_raminit/raminit_main.c b/src/northbridge/intel/haswell/native_raminit/raminit_main.c
  864. index 2fea658415..fcc981ad04 100644
  865. --- a/src/northbridge/intel/haswell/native_raminit/raminit_main.c
  866. +++ b/src/northbridge/intel/haswell/native_raminit/raminit_main.c
  867. @@ -22,6 +22,7 @@ static const struct task_entry cold_boot[] = {
  868. { collect_spd_info, true, "PROCSPD", },
  869. { initialise_mpll, true, "INITMPLL", },
  870. { convert_timings, true, "CONVTIM", },
  871. + { configure_mc, true, "CONFMC", },
  872. };
  873. /* Return a generic stepping value to make stepping checks simpler */
  874. @@ -53,6 +54,7 @@ static void initialize_ctrl(struct sysinfo *ctrl)
  875. ctrl->cpu = cpu_get_cpuid();
  876. ctrl->stepping = get_stepping(ctrl->cpu);
  877. + ctrl->vdd_mv = is_hsw_ult() ? 1350 : 1500; /** FIXME: Hardcoded, does it matter? **/
  878. ctrl->dq_pins_interleaved = cfg->dq_pins_interleaved;
  879. ctrl->bootmode = bootmode;
  880. }
  881. diff --git a/src/northbridge/intel/haswell/native_raminit/raminit_native.h b/src/northbridge/intel/haswell/native_raminit/raminit_native.h
  882. index e0ebd3a2a7..fffa6d5450 100644
  883. --- a/src/northbridge/intel/haswell/native_raminit/raminit_native.h
  884. +++ b/src/northbridge/intel/haswell/native_raminit/raminit_native.h
  885. @@ -3,16 +3,41 @@
  886. #ifndef HASWELL_RAMINIT_NATIVE_H
  887. #define HASWELL_RAMINIT_NATIVE_H
  888. +#include <assert.h>
  889. #include <device/dram/ddr3.h>
  890. #include <northbridge/intel/haswell/haswell.h>
  891. +#include <string.h>
  892. +#include <types.h>
  893. +
  894. +#include "reg_structs.h"
  895. /** TODO (Angel): Remove this after in-review patches are submitted **/
  896. #define SPD_LEN SPD_SIZE_MAX_DDR3
  897. +/* Each channel has 4 ranks, spread across 2 slots */
  898. +#define NUM_SLOTRANKS 4
  899. +
  900. +#define NUM_GROUPS 2
  901. +
  902. /* 8 data lanes + 1 ECC lane */
  903. #define NUM_LANES 9
  904. #define NUM_LANES_NO_ECC 8
  905. +#define COMP_INT 10
  906. +
  907. +/* Always use 12 legs for emphasis (not trained) */
  908. +#define TXEQFULLDRV (3 << 4)
  909. +
  910. +enum command_training_iteration {
  911. + CT_ITERATION_CLOCK = 0,
  912. + CT_ITERATION_CMD_NORTH,
  913. + CT_ITERATION_CMD_SOUTH,
  914. + CT_ITERATION_CKE,
  915. + CT_ITERATION_CTL,
  916. + CT_ITERATION_CMD_VREF,
  917. + MAX_CT_ITERATION,
  918. +};
  919. +
  920. enum raminit_boot_mode {
  921. BOOTMODE_COLD,
  922. BOOTMODE_WARM,
  923. @@ -58,6 +83,9 @@ struct sysinfo {
  924. * LPDDR-specific functions have stubs which will halt upon execution.
  925. */
  926. bool lpddr;
  927. + bool lpddr_dram_odt;
  928. + uint8_t lpddr_cke_rank_map[NUM_CHANNELS];
  929. + uint8_t dq_byte_map[NUM_CHANNELS][MAX_CT_ITERATION][2];
  930. struct raminit_dimm_info dimms[NUM_CHANNELS][NUM_SLOTS];
  931. union dimm_flags_ddr3_st flags;
  932. @@ -94,16 +122,89 @@ struct sysinfo {
  933. uint32_t mem_clock_mhz;
  934. uint32_t mem_clock_fs; /* Memory clock period in femtoseconds */
  935. uint32_t qclkps; /* Quadrature clock period in picoseconds */
  936. +
  937. + uint16_t vdd_mv;
  938. +
  939. + union ddr_scram_misc_control_reg misc_control_0;
  940. +
  941. + union ddr_comp_ctl_0_reg comp_ctl_0;
  942. + union ddr_comp_ctl_1_reg comp_ctl_1;
  943. +
  944. + union ddr_data_vref_adjust_reg dimm_vref;
  945. +
  946. + uint32_t data_offset_train[NUM_CHANNELS][NUM_LANES];
  947. + uint32_t data_offset_comp[NUM_CHANNELS][NUM_LANES];
  948. +
  949. + uint32_t dq_control_0[NUM_CHANNELS];
  950. + uint32_t dq_control_1[NUM_CHANNELS][NUM_LANES];
  951. + uint32_t dq_control_2[NUM_CHANNELS][NUM_LANES];
  952. +
  953. + uint16_t tx_dq[NUM_CHANNELS][NUM_SLOTRANKS][NUM_LANES];
  954. + uint16_t txdqs[NUM_CHANNELS][NUM_SLOTRANKS][NUM_LANES];
  955. + uint8_t tx_eq[NUM_CHANNELS][NUM_SLOTRANKS][NUM_LANES];
  956. +
  957. + uint16_t rcven[NUM_CHANNELS][NUM_SLOTRANKS][NUM_LANES];
  958. + uint8_t rx_eq[NUM_CHANNELS][NUM_SLOTRANKS][NUM_LANES];
  959. + uint8_t rxdqsp[NUM_CHANNELS][NUM_SLOTRANKS][NUM_LANES];
  960. + uint8_t rxdqsn[NUM_CHANNELS][NUM_SLOTRANKS][NUM_LANES];
  961. + int8_t rxvref[NUM_CHANNELS][NUM_SLOTRANKS][NUM_LANES];
  962. +
  963. + uint8_t clk_pi_code[NUM_CHANNELS][NUM_SLOTRANKS];
  964. + uint8_t ctl_pi_code[NUM_CHANNELS][NUM_SLOTRANKS];
  965. + uint8_t cke_pi_code[NUM_CHANNELS][NUM_SLOTRANKS];
  966. +
  967. + uint8_t cke_cmd_pi_code[NUM_CHANNELS][NUM_GROUPS];
  968. + uint8_t cmd_north_pi_code[NUM_CHANNELS][NUM_GROUPS];
  969. + uint8_t cmd_south_pi_code[NUM_CHANNELS][NUM_GROUPS];
  970. };
  971. +static inline bool is_hsw_ult(void)
  972. +{
  973. + return CONFIG(INTEL_LYNXPOINT_LP);
  974. +}
  975. +
  976. +static inline bool rank_in_mask(uint8_t rank, uint8_t rankmask)
  977. +{
  978. + assert(rank < NUM_SLOTRANKS);
  979. + return !!(BIT(rank) & rankmask);
  980. +}
  981. +
  982. +static inline bool does_ch_exist(const struct sysinfo *ctrl, uint8_t channel)
  983. +{
  984. + return !!ctrl->dpc[channel];
  985. +}
  986. +
  987. +static inline bool does_rank_exist(const struct sysinfo *ctrl, uint8_t rank)
  988. +{
  989. + return rank_in_mask(rank, ctrl->rankmap[0] | ctrl->rankmap[1]);
  990. +}
  991. +
  992. +static inline bool rank_in_ch(const struct sysinfo *ctrl, uint8_t rank, uint8_t channel)
  993. +{
  994. + assert(channel < NUM_CHANNELS);
  995. + return rank_in_mask(rank, ctrl->rankmap[channel]);
  996. +}
  997. +
  998. +/** TODO: Handling of data_offset_train could be improved, also coupled with reg updates **/
  999. +static inline void clear_data_offset_train_all(struct sysinfo *ctrl)
  1000. +{
  1001. + memset(ctrl->data_offset_train, 0, sizeof(ctrl->data_offset_train));
  1002. +}
  1003. +
  1004. void raminit_main(enum raminit_boot_mode bootmode);
  1005. enum raminit_status collect_spd_info(struct sysinfo *ctrl);
  1006. enum raminit_status initialise_mpll(struct sysinfo *ctrl);
  1007. enum raminit_status convert_timings(struct sysinfo *ctrl);
  1008. +enum raminit_status configure_mc(struct sysinfo *ctrl);
  1009. +
  1010. +void configure_timings(struct sysinfo *ctrl);
  1011. +void configure_refresh(struct sysinfo *ctrl);
  1012. enum raminit_status wait_for_first_rcomp(void);
  1013. +uint8_t get_rx_bias(const struct sysinfo *ctrl);
  1014. +
  1015. uint8_t get_tCWL(uint32_t mem_clock_mhz);
  1016. uint32_t get_tREFI(uint32_t mem_clock_mhz);
  1017. uint32_t get_tXP(uint32_t mem_clock_mhz);
  1018. diff --git a/src/northbridge/intel/haswell/native_raminit/reg_structs.h b/src/northbridge/intel/haswell/native_raminit/reg_structs.h
  1019. new file mode 100644
  1020. index 0000000000..d11cda4b3d
  1021. --- /dev/null
  1022. +++ b/src/northbridge/intel/haswell/native_raminit/reg_structs.h
  1023. @@ -0,0 +1,405 @@
  1024. +/* SPDX-License-Identifier: GPL-2.0-or-later */
  1025. +
  1026. +#ifndef HASWELL_RAMINIT_REG_STRUCTS_H
  1027. +#define HASWELL_RAMINIT_REG_STRUCTS_H
  1028. +
  1029. +union ddr_data_rx_train_rank_reg {
  1030. + struct __packed {
  1031. + uint32_t rcven : 9; // Bits 8:0
  1032. + uint32_t dqs_p : 6; // Bits 14:9
  1033. + uint32_t rx_eq : 5; // Bits 19:15
  1034. + uint32_t dqs_n : 6; // Bits 25:20
  1035. + int32_t vref : 6; // Bits 31:26
  1036. + };
  1037. + uint32_t raw;
  1038. +};
  1039. +
  1040. +union ddr_data_tx_train_rank_reg {
  1041. + struct __packed {
  1042. + uint32_t dq_delay : 9; // Bits 8:0
  1043. + uint32_t dqs_delay : 9; // Bits 17:9
  1044. + uint32_t : 2; // Bits 19:18
  1045. + uint32_t tx_eq : 6; // Bits 25:20
  1046. + uint32_t : 6; // Bits 31:26
  1047. + };
  1048. + uint32_t raw;
  1049. +};
  1050. +
  1051. +union ddr_data_control_0_reg {
  1052. + struct __packed {
  1053. + uint32_t rx_training_mode : 1; // Bits 0:0
  1054. + uint32_t wl_training_mode : 1; // Bits 1:1
  1055. + uint32_t rl_training_mode : 1; // Bits 2:2
  1056. + uint32_t samp_train_mode : 1; // Bits 3:3
  1057. + uint32_t tx_on : 1; // Bits 4:4
  1058. + uint32_t rf_on : 1; // Bits 5:5
  1059. + uint32_t rx_pi_on : 1; // Bits 6:6
  1060. + uint32_t tx_pi_on : 1; // Bits 7:7
  1061. + uint32_t internal_clocks_on : 1; // Bits 8:8
  1062. + uint32_t repeater_clocks_on : 1; // Bits 9:9
  1063. + uint32_t tx_disable : 1; // Bits 10:10
  1064. + uint32_t rx_disable : 1; // Bits 11:11
  1065. + uint32_t tx_long : 1; // Bits 12:12
  1066. + uint32_t rx_dqs_ctle : 2; // Bits 14:13
  1067. + uint32_t rx_read_pointer : 3; // Bits 17:15
  1068. + uint32_t driver_segment_enable : 1; // Bits 18:18
  1069. + uint32_t data_vccddq_hi : 1; // Bits 19:19
  1070. + uint32_t read_rf_rd : 1; // Bits 20:20
  1071. + uint32_t read_rf_wr : 1; // Bits 21:21
  1072. + uint32_t read_rf_rank : 2; // Bits 23:22
  1073. + uint32_t force_odt_on : 1; // Bits 24:24
  1074. + uint32_t odt_samp_off : 1; // Bits 25:25
  1075. + uint32_t disable_odt_static : 1; // Bits 26:26
  1076. + uint32_t ddr_cr_force_odt_on : 1; // Bits 27:27
  1077. + uint32_t lpddr_mode : 1; // Bits 28:28
  1078. + uint32_t en_read_preamble : 1; // Bits 29:29
  1079. + uint32_t odt_samp_extend_en : 1; // Bits 30:30
  1080. + uint32_t early_rleak_en : 1; // Bits 31:31
  1081. + };
  1082. + uint32_t raw;
  1083. +};
  1084. +
  1085. +union ddr_data_control_1_reg {
  1086. + struct __packed {
  1087. + int32_t ref_pi : 4; // Bits 3:0
  1088. + uint32_t dll_mask : 2; // Bits 5:4
  1089. + uint32_t dll_weaklock : 1; // Bits 6:6
  1090. + uint32_t sdll_segment_disable : 3; // Bits 9:7
  1091. + uint32_t rx_bias_ctl : 3; // Bits 12:10
  1092. + int32_t odt_delay : 4; // Bits 16:13
  1093. + uint32_t odt_duration : 3; // Bits 19:17
  1094. + int32_t sense_amp_delay : 4; // Bits 23:20
  1095. + uint32_t sense_amp_duration : 3; // Bits 26:24
  1096. + uint32_t burst_end_odt_delay : 3; // Bits 29:27 *** TODO: Check Broadwell ***
  1097. + uint32_t lpddr_long_odt_en : 1; // Bits 30:30
  1098. + uint32_t : 1; // Bits 31:31
  1099. + };
  1100. + uint32_t raw;
  1101. +};
  1102. +
  1103. +/* NOTE: Bits 31:19 are only valid for Broadwell onwards */
  1104. +union ddr_data_control_2_reg {
  1105. + struct __packed {
  1106. + uint32_t rx_stagger_ctl : 5; // Bits 4:0
  1107. + uint32_t force_bias_on : 1; // Bits 5:5
  1108. + uint32_t force_rx_on : 1; // Bits 6:6
  1109. + uint32_t leaker_comp : 2; // Bits 8:7
  1110. + uint32_t rx_dqs_amp_offset : 4; // Bits 12:9
  1111. + uint32_t rx_clk_stg_num : 5; // Bits 17:13
  1112. + uint32_t wl_long_delay : 1; // Bits 18:18
  1113. + uint32_t enable_vref_pwrdn : 1; // Bits 19:19
  1114. + uint32_t ddr4_mode : 1; // Bits 20:20
  1115. + uint32_t en_vddq_odt : 1; // Bits 21:21
  1116. + uint32_t en_vtt_odt : 1; // Bits 22:22
  1117. + uint32_t en_const_z_eq_tx : 1; // Bits 23:23
  1118. + uint32_t tx_eq_dis : 1; // Bits 24:24
  1119. + uint32_t rx_vref_prog_mfc : 1; // Bits 25:25
  1120. + uint32_t cben : 3; // Bits 28:26
  1121. + uint32_t tx_deskew_disable : 1; // Bits 29:29
  1122. + uint32_t rx_deskew_disable : 1; // Bits 30:30
  1123. + uint32_t dq_slew_dly_byp : 1; // Bits 31:31
  1124. + };
  1125. + uint32_t raw;
  1126. +};
  1127. +
  1128. +union ddr_comp_data_comp_1_reg {
  1129. + struct __packed {
  1130. + uint32_t rcomp_odt_up : 6; // Bits 5:0
  1131. + uint32_t : 3; // Bits 8:6
  1132. + uint32_t rcomp_odt_down : 6; // Bits 14:9
  1133. + uint32_t : 1; // Bits 15:15
  1134. + uint32_t panic_drv_down : 6; // Bits 21:16
  1135. + uint32_t panic_drv_up : 6; // Bits 27:22
  1136. + uint32_t ls_comp : 3; // Bits 30:28
  1137. + uint32_t : 1; // Bits 31:31
  1138. + };
  1139. + uint32_t raw;
  1140. +};
  1141. +
  1142. +union ddr_comp_ctl_0_reg {
  1143. + struct __packed {
  1144. + uint32_t : 3; // Bits 2:0
  1145. + uint32_t disable_odt_static : 1; // Bits 3:3
  1146. + uint32_t odt_up_down_off : 6; // Bits 9:4
  1147. + uint32_t fixed_odt_offset : 1; // Bits 10:10
  1148. + int32_t dq_drv_vref : 4; // Bits 14:11
  1149. + int32_t dq_odt_vref : 5; // Bits 19:15
  1150. + int32_t cmd_drv_vref : 4; // Bits 23:20
  1151. + int32_t ctl_drv_vref : 4; // Bits 27:24
  1152. + int32_t clk_drv_vref : 4; // Bits 31:28
  1153. + };
  1154. + uint32_t raw;
  1155. +};
  1156. +
  1157. +union ddr_comp_ctl_1_reg {
  1158. + struct __packed {
  1159. + uint32_t dq_scomp : 5; // Bits 4:0
  1160. + uint32_t cmd_scomp : 5; // Bits 9:5
  1161. + uint32_t ctl_scomp : 5; // Bits 14:10
  1162. + uint32_t clk_scomp : 5; // Bits 19:15
  1163. + uint32_t tco_cmd_offset : 4; // Bits 23:20
  1164. + uint32_t comp_clk_on : 1; // Bits 24:24
  1165. + uint32_t vccddq_hi : 1; // Bits 25:25
  1166. + uint32_t : 3; // Bits 28:26
  1167. + uint32_t dis_quick_comp : 1; // Bits 29:29
  1168. + uint32_t sin_step : 1; // Bits 30:30
  1169. + uint32_t sin_step_adv : 1; // Bits 31:31
  1170. + };
  1171. + uint32_t raw;
  1172. +};
  1173. +
  1174. +union ddr_data_vref_adjust_reg {
  1175. + struct __packed {
  1176. + int32_t ca_vref_ctrl : 7;// Bits 6:0
  1177. + int32_t ch1_vref_ctrl : 7;// Bits 13:7
  1178. + int32_t ch0_vref_ctrl : 7;// Bits 20:14
  1179. + uint32_t en_dimm_vref_ca : 1;// Bits 21:21
  1180. + uint32_t en_dimm_vref_ch1 : 1;// Bits 22:22
  1181. + uint32_t en_dimm_vref_ch0 : 1;// Bits 23:23
  1182. + uint32_t hi_z_timer_ctrl : 2;// Bits 25:24
  1183. + uint32_t vccddq_hi_qnnn_h : 1;// Bits 26:26
  1184. + uint32_t : 2;// Bits 28:27
  1185. + uint32_t ca_slow_bw : 1;// Bits 29:29
  1186. + uint32_t ch0_slow_bw : 1;// Bits 30:30
  1187. + uint32_t ch1_slow_bw : 1;// Bits 31:31
  1188. + };
  1189. + uint32_t raw;
  1190. +};
  1191. +
  1192. +union ddr_data_vref_control_reg {
  1193. + struct __packed {
  1194. + uint32_t hi_bw_divider : 2; // Bits 1:0
  1195. + uint32_t lo_bw_divider : 2; // Bits 3:2
  1196. + uint32_t sample_divider : 3; // Bits 6:4
  1197. + uint32_t open_loop : 1; // Bits 7:7
  1198. + uint32_t slow_bw_error : 2; // Bits 9:8
  1199. + uint32_t hi_bw_enable : 1; // Bits 10:10
  1200. + uint32_t : 1; // Bits 11:11
  1201. + uint32_t vt_slope_b : 3; // Bits 14:12
  1202. + uint32_t vt_slope_a : 3; // Bits 17:15
  1203. + uint32_t vt_offset : 3; // Bits 20:18
  1204. + uint32_t sel_code : 3; // Bits 23:21
  1205. + uint32_t output_code : 8; // Bits 31:24
  1206. + };
  1207. + uint32_t raw;
  1208. +};
  1209. +
  1210. +union ddr_comp_vsshi_reg {
  1211. + struct __packed {
  1212. + uint32_t panic_drv_down_vref : 6; // Bits 5:0
  1213. + uint32_t panic_drv_up_vref : 6; // Bits 11:6
  1214. + uint32_t vt_offset : 5; // Bits 16:12
  1215. + uint32_t vt_slope_a : 3; // Bits 19:17
  1216. + uint32_t vt_slope_b : 3; // Bits 22:20
  1217. + uint32_t : 9; // Bits 31:23
  1218. + };
  1219. + uint32_t raw;
  1220. +};
  1221. +
  1222. +union ddr_comp_vsshi_control_reg {
  1223. + struct __packed {
  1224. + uint32_t vsshi_target : 6; // Bits 5:0
  1225. + uint32_t hi_bw_divider : 2; // Bits 7:6
  1226. + uint32_t lo_bw_divider : 2; // Bits 9:8
  1227. + uint32_t sample_divider : 3; // Bits 12:10
  1228. + uint32_t open_loop : 1; // Bits 13:13
  1229. + uint32_t bw_error : 2; // Bits 15:14
  1230. + uint32_t panic_driver_en : 1; // Bits 16:16
  1231. + uint32_t : 1; // Bits 17:17
  1232. + uint32_t panic_voltage : 4; // Bits 21:18
  1233. + uint32_t gain_boost : 1; // Bits 22:22
  1234. + uint32_t sel_code : 1; // Bits 23:23
  1235. + uint32_t output_code : 8; // Bits 31:24
  1236. + };
  1237. + uint32_t raw;
  1238. +};
  1239. +
  1240. +union ddr_clk_controls_reg {
  1241. + struct __packed {
  1242. + uint32_t ref_pi : 4; // Bits 3:0
  1243. + uint32_t dll_mask : 2; // Bits 5:4
  1244. + uint32_t : 1; // Bits 6:6
  1245. + uint32_t tx_on : 1; // Bits 7:7
  1246. + uint32_t internal_clocks_on : 1; // Bits 8:8
  1247. + uint32_t repeater_clocks_on : 1; // Bits 9:9
  1248. + uint32_t io_lb_ctl : 2; // Bits 11:10
  1249. + uint32_t odt_mode : 1; // Bits 12:12
  1250. + uint32_t : 8; // Bits 20:13
  1251. + uint32_t rx_vref : 6; // Bits 26:21
  1252. + uint32_t vccddq_hi : 1; // Bits 27:27
  1253. + uint32_t dll_weaklock : 1; // Bits 28:28
  1254. + uint32_t lpddr_mode : 1; // Bits 29:29
  1255. + uint32_t : 2; // Bits 31:30
  1256. + };
  1257. + uint32_t raw;
  1258. +};
  1259. +
  1260. +union ddr_cmd_controls_reg {
  1261. + struct __packed {
  1262. + int32_t ref_pi : 4; // Bits 3:0
  1263. + uint32_t dll_mask : 2; // Bits 5:4
  1264. + uint32_t : 1; // Bits 6:6
  1265. + uint32_t tx_on : 1; // Bits 7:7
  1266. + uint32_t internal_clocks_on : 1; // Bits 8:8
  1267. + uint32_t repeater_clocks_on : 1; // Bits 9:9
  1268. + uint32_t io_lb_ctl : 2; // Bits 11:10
  1269. + uint32_t odt_mode : 1; // Bits 12:12
  1270. + uint32_t cmd_tx_eq : 2; // Bits 14:13
  1271. + uint32_t early_weak_drive : 2; // Bits 16:15
  1272. + uint32_t : 4; // Bits 20:17
  1273. + int32_t rx_vref : 6; // Bits 26:21
  1274. + uint32_t vccddq_hi : 1; // Bits 27:27
  1275. + uint32_t dll_weaklock : 1; // Bits 28:28
  1276. + uint32_t lpddr_mode : 1; // Bits 29:29
  1277. + uint32_t lpddr_ca_a_dis : 1; // Bits 30:30
  1278. + uint32_t lpddr_ca_b_dis : 1; // Bits 31:31
  1279. + };
  1280. + uint32_t raw;
  1281. +};
  1282. +
  1283. +/* Same register definition for CKE and CTL fubs */
  1284. +union ddr_cke_ctl_controls_reg {
  1285. + struct __packed {
  1286. + int32_t ref_pi : 4; // Bits 3:0
  1287. + uint32_t dll_mask : 2; // Bits 5:4
  1288. + uint32_t : 1; // Bits 6:6
  1289. + uint32_t tx_on : 1; // Bits 7:7
  1290. + uint32_t internal_clocks_on : 1; // Bits 8:8
  1291. + uint32_t repeater_clocks_on : 1; // Bits 9:9
  1292. + uint32_t io_lb_ctl : 2; // Bits 11:10
  1293. + uint32_t odt_mode : 1; // Bits 12:12
  1294. + uint32_t cmd_tx_eq : 2; // Bits 14:13
  1295. + uint32_t early_weak_drive : 2; // Bits 16:15
  1296. + uint32_t ctl_tx_eq : 2; // Bits 18:17
  1297. + uint32_t ctl_sr_drv : 2; // Bits 20:19
  1298. + int32_t rx_vref : 6; // Bits 26:21
  1299. + uint32_t vccddq_hi : 1; // Bits 27:27
  1300. + uint32_t dll_weaklock : 1; // Bits 28:28
  1301. + uint32_t lpddr_mode : 1; // Bits 29:29
  1302. + uint32_t la_drv_en_ovrd : 1; // Bits 30:30
  1303. + uint32_t lpddr_ca_a_dis : 1; // Bits 31:31
  1304. + };
  1305. + uint32_t raw;
  1306. +};
  1307. +
  1308. +union ddr_scram_misc_control_reg {
  1309. + struct __packed {
  1310. + uint32_t wl_wake_cycles : 2; // Bits 1:0
  1311. + uint32_t wl_sleep_cycles : 3; // Bits 4:2
  1312. + uint32_t force_comp_update : 1; // Bits 5:5
  1313. + uint32_t weaklock_latency : 4; // Bits 9:6
  1314. + uint32_t ddr_no_ch_interleave : 1; // Bits 10:10
  1315. + uint32_t lpddr_mode : 1; // Bits 11:11
  1316. + uint32_t cke_mapping_ch0 : 4; // Bits 15:12
  1317. + uint32_t cke_mapping_ch1 : 4; // Bits 19:16
  1318. + uint32_t : 12; // Bits 31:20
  1319. + };
  1320. + uint32_t raw;
  1321. +};
  1322. +
  1323. +union mcscheds_cbit_reg {
  1324. + struct __packed {
  1325. + uint32_t dis_opp_cas : 1; // Bits 0:0
  1326. + uint32_t dis_opp_is_cas : 1; // Bits 1:1
  1327. + uint32_t dis_opp_ras : 1; // Bits 2:2
  1328. + uint32_t dis_opp_is_ras : 1; // Bits 3:3
  1329. + uint32_t dis_1c_byp : 1; // Bits 4:4
  1330. + uint32_t dis_2c_byp : 1; // Bits 5:5
  1331. + uint32_t dis_deprd_opt : 1; // Bits 6:6
  1332. + uint32_t dis_pt_it : 1; // Bits 7:7
  1333. + uint32_t dis_prcnt_ring : 1; // Bits 8:8
  1334. + uint32_t dis_prcnt_sa : 1; // Bits 9:9
  1335. + uint32_t dis_blkr_ph : 1; // Bits 10:10
  1336. + uint32_t dis_blkr_pe : 1; // Bits 11:11
  1337. + uint32_t dis_blkr_pm : 1; // Bits 12:12
  1338. + uint32_t dis_odt : 1; // Bits 13:13
  1339. + uint32_t oe_always_off : 1; // Bits 14:14
  1340. + uint32_t : 1; // Bits 15:15
  1341. + uint32_t dis_aom : 1; // Bits 16:16
  1342. + uint32_t block_rpq : 1; // Bits 17:17
  1343. + uint32_t block_wpq : 1; // Bits 18:18
  1344. + uint32_t invert_align : 1; // Bits 19:19
  1345. + uint32_t dis_write_gap : 1; // Bits 20:20
  1346. + uint32_t dis_zq : 1; // Bits 21:21
  1347. + uint32_t dis_tt : 1; // Bits 22:22
  1348. + uint32_t dis_opp_ref : 1; // Bits 23:23
  1349. + uint32_t long_zq : 1; // Bits 24:24
  1350. + uint32_t dis_srx_zq : 1; // Bits 25:25
  1351. + uint32_t serialize_zq : 1; // Bits 26:26
  1352. + uint32_t zq_fast_exec : 1; // Bits 27:27
  1353. + uint32_t dis_drive_nop : 1; // Bits 28:28
  1354. + uint32_t pres_wdb_ent : 1; // Bits 29:29
  1355. + uint32_t dis_clk_gate : 1; // Bits 30:30
  1356. + uint32_t : 1; // Bits 31:31
  1357. + };
  1358. + uint32_t raw;
  1359. +};
  1360. +
  1361. +union mcmain_command_rate_limit_reg {
  1362. + struct __packed {
  1363. + uint32_t enable_cmd_limit : 1; // Bits 0:0
  1364. + uint32_t cmd_rate_limit : 3; // Bits 3:1
  1365. + uint32_t reset_on_command : 4; // Bits 7:4
  1366. + uint32_t reset_delay : 4; // Bits 11:8
  1367. + uint32_t ck_to_cke_delay : 2; // Bits 13:12
  1368. + uint32_t : 17; // Bits 30:14
  1369. + uint32_t init_mrw_2n_cs : 1; // Bits 31:31
  1370. + };
  1371. + uint32_t raw;
  1372. +};
  1373. +
  1374. +union mad_chnl_reg {
  1375. + struct __packed {
  1376. + uint32_t ch_a : 2; // Bits 1:0
  1377. + uint32_t ch_b : 2; // Bits 3:2
  1378. + uint32_t ch_c : 2; // Bits 5:4
  1379. + uint32_t stacked_mode : 1; // Bits 6:6
  1380. + uint32_t stkd_mode_bits : 3; // Bits 9:7
  1381. + uint32_t lpddr_mode : 1; // Bits 10:10
  1382. + uint32_t : 21; // Bits 31:11
  1383. + };
  1384. + uint32_t raw;
  1385. +};
  1386. +
  1387. +union mad_dimm_reg {
  1388. + struct __packed {
  1389. + uint32_t dimm_a_size : 8; // Bits 7:0
  1390. + uint32_t dimm_b_size : 8; // Bits 15:8
  1391. + uint32_t dimm_a_sel : 1; // Bits 16:16
  1392. + uint32_t dimm_a_ranks : 1; // Bits 17:17
  1393. + uint32_t dimm_b_ranks : 1; // Bits 18:18
  1394. + uint32_t dimm_a_width : 1; // Bits 19:19
  1395. + uint32_t dimm_b_width : 1; // Bits 20:20
  1396. + uint32_t rank_interleave : 1; // Bits 21:21
  1397. + uint32_t enh_interleave : 1; // Bits 22:22
  1398. + uint32_t : 1; // Bits 23:23
  1399. + uint32_t ecc_mode : 2; // Bits 25:24
  1400. + uint32_t hori_mode : 1; // Bits 26:26
  1401. + uint32_t hori_address : 3; // Bits 29:27
  1402. + uint32_t : 2; // Bits 31:30
  1403. + };
  1404. + uint32_t raw;
  1405. +};
  1406. +
  1407. +union mad_zr_reg {
  1408. + struct __packed {
  1409. + uint32_t : 16; // Bits 15:0
  1410. + uint32_t ch_b_double : 8; // Bits 23:16
  1411. + uint32_t ch_b_single : 8; // Bits 31:24
  1412. + };
  1413. + uint32_t raw;
  1414. +};
  1415. +
  1416. +/* Same definition for P_COMP, M_COMP, D_COMP */
  1417. +union pcu_comp_reg {
  1418. + struct __packed {
  1419. + uint32_t comp_disable : 1; // Bits 0:0
  1420. + uint32_t comp_interval : 4; // Bits 4:1
  1421. + uint32_t : 3; // Bits 7:5
  1422. + uint32_t comp_force : 1; // Bits 8:8
  1423. + uint32_t : 23; // Bits 31:9
  1424. + };
  1425. + uint32_t raw;
  1426. +};
  1427. +
  1428. +#endif
  1429. diff --git a/src/northbridge/intel/haswell/native_raminit/timings_refresh.c b/src/northbridge/intel/haswell/native_raminit/timings_refresh.c
  1430. new file mode 100644
  1431. index 0000000000..a9d960f31b
  1432. --- /dev/null
  1433. +++ b/src/northbridge/intel/haswell/native_raminit/timings_refresh.c
  1434. @@ -0,0 +1,13 @@
  1435. +/* SPDX-License-Identifier: GPL-2.0-or-later */
  1436. +
  1437. +#include "raminit_native.h"
  1438. +
  1439. +void configure_timings(struct sysinfo *ctrl)
  1440. +{
  1441. + /** TODO: Stub **/
  1442. +}
  1443. +
  1444. +void configure_refresh(struct sysinfo *ctrl)
  1445. +{
  1446. + /** TODO: Stub **/
  1447. +}
  1448. diff --git a/src/northbridge/intel/haswell/registers/mchbar.h b/src/northbridge/intel/haswell/registers/mchbar.h
  1449. index 45f8174995..4c3f399b5d 100644
  1450. --- a/src/northbridge/intel/haswell/registers/mchbar.h
  1451. +++ b/src/northbridge/intel/haswell/registers/mchbar.h
  1452. @@ -7,9 +7,98 @@
  1453. #define NUM_CHANNELS 2
  1454. #define NUM_SLOTS 2
  1455. +/* Indexed register helper macros */
  1456. +#define _DDRIO_C_R_B(r, ch, rank, byte) ((r) + 0x100 * (ch) + 0x4 * (rank) + 0x200 * (byte))
  1457. +#define _MCMAIN_C_X(r, ch, x) ((r) + 0x400 * (ch) + 0x4 * (x))
  1458. +#define _MCMAIN_C(r, ch) ((r) + 0x400 * (ch))
  1459. +
  1460. /* Register definitions */
  1461. +
  1462. +/* DDR DATA per-channel per-bytelane */
  1463. +#define DQ_CONTROL_2(ch, byte) _DDRIO_C_R_B(0x0064, ch, 0, byte)
  1464. +
  1465. +/* DDR CKE per-channel */
  1466. +#define DDR_CKE_ch_CMD_COMP_OFFSET(ch) _DDRIO_C_R_B(0x1204, ch, 0, 0)
  1467. +#define DDR_CKE_ch_CMD_PI_CODING(ch) _DDRIO_C_R_B(0x1208, ch, 0, 0)
  1468. +
  1469. +#define DDR_CKE_ch_CTL_CONTROLS(ch) _DDRIO_C_R_B(0x121c, ch, 0, 0)
  1470. +#define DDR_CKE_ch_CTL_RANKS_USED(ch) _DDRIO_C_R_B(0x1220, ch, 0, 0)
  1471. +
  1472. +/* DDR CTL per-channel */
  1473. +#define DDR_CTL_ch_CTL_CONTROLS(ch) _DDRIO_C_R_B(0x1c1c, ch, 0, 0)
  1474. +#define DDR_CTL_ch_CTL_RANKS_USED(ch) _DDRIO_C_R_B(0x1c20, ch, 0, 0)
  1475. +
  1476. +/* DDR CLK per-channel */
  1477. +#define DDR_CLK_ch_RANKS_USED(ch) _DDRIO_C_R_B(0x1800, ch, 0, 0)
  1478. +#define DDR_CLK_ch_COMP_OFFSET(ch) _DDRIO_C_R_B(0x1808, ch, 0, 0)
  1479. +#define DDR_CLK_ch_PI_CODING(ch) _DDRIO_C_R_B(0x180c, ch, 0, 0)
  1480. +#define DDR_CLK_ch_CONTROLS(ch) _DDRIO_C_R_B(0x1810, ch, 0, 0)
  1481. +
  1482. +/* DDR Scrambler */
  1483. +#define DDR_SCRAMBLE_ch(ch) (0x2000 + 4 * (ch))
  1484. +#define DDR_SCRAM_MISC_CONTROL 0x2008
  1485. +
  1486. +/* DDR CMDN/CMDS per-channel (writes go to both CMDN and CMDS fubs) */
  1487. +#define DDR_CMD_ch_COMP_OFFSET(ch) _DDRIO_C_R_B(0x3204, ch, 0, 0)
  1488. +#define DDR_CMD_ch_PI_CODING(ch) _DDRIO_C_R_B(0x3208, ch, 0, 0)
  1489. +#define DDR_CMD_ch_CONTROLS(ch) _DDRIO_C_R_B(0x320c, ch, 0, 0)
  1490. +
  1491. +/* DDR CKE/CTL per-channel (writes go to both CKE and CTL fubs) */
  1492. +#define DDR_CKE_CTL_ch_CTL_COMP_OFFSET(ch) _DDRIO_C_R_B(0x3414, ch, 0, 0)
  1493. +#define DDR_CKE_CTL_ch_CTL_PI_CODING(ch) _DDRIO_C_R_B(0x3418, ch, 0, 0)
  1494. +
  1495. +/* DDR DATA broadcast */
  1496. +#define DDR_DATA_RX_TRAIN_RANK(rank) _DDRIO_C_R_B(0x3600, 0, rank, 0)
  1497. +#define DDR_DATA_RX_PER_BIT_RANK(rank) _DDRIO_C_R_B(0x3610, 0, rank, 0)
  1498. +#define DDR_DATA_TX_TRAIN_RANK(rank) _DDRIO_C_R_B(0x3620, 0, rank, 0)
  1499. +#define DDR_DATA_TX_PER_BIT_RANK(rank) _DDRIO_C_R_B(0x3630, 0, rank, 0)
  1500. +
  1501. +#define DDR_DATA_RCOMP_DATA_1 0x3644
  1502. +#define DDR_DATA_TX_XTALK 0x3648
  1503. +#define DDR_DATA_RX_OFFSET_VDQ 0x364c
  1504. +#define DDR_DATA_OFFSET_COMP 0x365c
  1505. +#define DDR_DATA_CONTROL_1 0x3660
  1506. +
  1507. +#define DDR_DATA_OFFSET_TRAIN 0x3670
  1508. +#define DDR_DATA_CONTROL_0 0x3674
  1509. +#define DDR_DATA_VREF_ADJUST 0x3678
  1510. +
  1511. +/* DDR CMD broadcast */
  1512. +#define DDR_CMD_COMP 0x3700
  1513. +
  1514. +/* DDR CKE/CTL broadcast */
  1515. +#define DDR_CKE_CTL_COMP 0x3810
  1516. +
  1517. +/* DDR CLK broadcast */
  1518. +#define DDR_CLK_COMP 0x3904
  1519. +#define DDR_CLK_CONTROLS 0x3910
  1520. +#define DDR_CLK_CB_STATUS 0x3918
  1521. +
  1522. +/* DDR COMP (global) */
  1523. +#define DDR_COMP_DATA_COMP_1 0x3a04
  1524. +#define DDR_COMP_CMD_COMP 0x3a08
  1525. +#define DDR_COMP_CTL_COMP 0x3a0c
  1526. +#define DDR_COMP_CLK_COMP 0x3a10
  1527. +#define DDR_COMP_CTL_0 0x3a14
  1528. +#define DDR_COMP_CTL_1 0x3a18
  1529. +#define DDR_COMP_VSSHI 0x3a1c
  1530. +#define DDR_COMP_OVERRIDE 0x3a20
  1531. +#define DDR_COMP_VSSHI_CONTROL 0x3a24
  1532. +
  1533. +/* MCMAIN per-channel */
  1534. +#define COMMAND_RATE_LIMIT_ch(ch) _MCMAIN_C(0x4010, ch)
  1535. +
  1536. +#define MC_INIT_STATE_ch(ch) _MCMAIN_C(0x42a0, ch)
  1537. +
  1538. +/* MCMAIN broadcast */
  1539. +#define MCSCHEDS_CBIT 0x4c20
  1540. +
  1541. +#define MCMNTS_SC_WDBWM 0x4f8c
  1542. +
  1543. +/* MCDECS */
  1544. #define MAD_CHNL 0x5000 /* Address Decoder Channel Configuration */
  1545. #define MAD_DIMM(ch) (0x5004 + (ch) * 4)
  1546. +#define MAD_ZR 0x5014
  1547. #define MC_INIT_STATE_G 0x5030
  1548. #define MRC_REVISION 0x5034 /* MRC Revision */
  1549. @@ -28,6 +117,8 @@
  1550. #define PCU_DDR_PTM_CTL 0x5880
  1551. +#define PCU_DDR_VOLTAGE 0x58a4
  1552. +
  1553. /* Some power MSRs are also represented in MCHBAR */
  1554. #define MCH_PKG_POWER_LIMIT_LO 0x59a0
  1555. #define MCH_PKG_POWER_LIMIT_HI 0x59a4
  1556. @@ -48,6 +139,8 @@
  1557. #define MAILBOX_BIOS_CMD_FSM_MEASURE_INTVL 0x909
  1558. #define MAILBOX_BIOS_CMD_READ_PCH_POWER 0xa
  1559. #define MAILBOX_BIOS_CMD_READ_PCH_POWER_EXT 0xb
  1560. +#define MAILBOX_BIOS_CMD_READ_DDR_2X_REFRESH 0x17
  1561. +#define MAILBOX_BIOS_CMD_WRITE_DDR_2X_REFRESH 0x18
  1562. #define MAILBOX_BIOS_CMD_READ_C9C10_VOLTAGE 0x26
  1563. #define MAILBOX_BIOS_CMD_WRITE_C9C10_VOLTAGE 0x27
  1564. @@ -66,6 +159,7 @@
  1565. #define MC_BIOS_REQ 0x5e00 /* Memory frequency request register */
  1566. #define MC_BIOS_DATA 0x5e04 /* Miscellaneous information for BIOS */
  1567. #define SAPMCTL 0x5f00
  1568. +#define M_COMP 0x5f08
  1569. #define HDAUDRID 0x6008
  1570. #define UMAGFXCTL 0x6020
  1571. --
  1572. 2.39.2