1 // SPDX-License-Identifier: Intel
3 * Copyright (C) 2013, Intel Corporation
4 * Copyright (C) 2015, Bin Meng <bmeng.cn@gmail.com>
6 * Ported from Intel released Quark UEFI BIOS
7 * QuarkSocPkg/QuarkNorthCluster/MemoryInit/Pei
12 #include <asm/arch/device.h>
13 #include <asm/arch/mrc.h>
14 #include <asm/arch/msg_port.h>
19 /* t_ck clock period in picoseconds per speed index 800, 1066, 1333 */
20 static const uint32_t t_ck[3] = {
26 /* Global variables */
27 static const uint16_t ddr_wclk[] = {193, 158};
29 static const uint16_t ddr_wctl[] = {1, 217};
32 static const uint16_t ddr_wcmd[] = {1, 220};
36 static const uint16_t ddr_rcvn[] = {129, 498};
40 static const uint16_t ddr_wdqs[] = {65, 289};
44 static const uint8_t ddr_rdqs[] = {32, 24};
48 static const uint16_t ddr_wdq[] = {32, 257};
51 /* Stop self refresh driven by MCU */
52 void clear_self_refresh(struct mrc_params *mrc_params)
56 /* clear the PMSTS Channel Self Refresh bits */
57 mrc_write_mask(MEM_CTLR, PMSTS, PMSTS_DISR, PMSTS_DISR);
62 /* It will initialize timing registers in the MCU (DTR0..DTR4) */
63 void prog_ddr_timing_control(struct mrc_params *mrc_params)
66 uint8_t trp, trcd, tras, twr, twtr, trrd, trtp, tfaw;
68 u32 dtr0, dtr1, dtr2, dtr3, dtr4;
74 mrc_post_code(0x02, 0x00);
76 dtr0 = msg_port_read(MEM_CTLR, DTR0);
77 dtr1 = msg_port_read(MEM_CTLR, DTR1);
78 dtr2 = msg_port_read(MEM_CTLR, DTR2);
79 dtr3 = msg_port_read(MEM_CTLR, DTR3);
80 dtr4 = msg_port_read(MEM_CTLR, DTR4);
82 tck = t_ck[mrc_params->ddr_speed]; /* Clock in picoseconds */
83 tcl = mrc_params->params.cl; /* CAS latency in clocks */
84 trp = tcl; /* Per CAT MRC */
85 trcd = tcl; /* Per CAT MRC */
86 tras = MCEIL(mrc_params->params.ras, tck);
88 /* Per JEDEC: tWR=15000ps DDR2/3 from 800-1600 */
89 twr = MCEIL(15000, tck);
91 twtr = MCEIL(mrc_params->params.wtr, tck);
92 trrd = MCEIL(mrc_params->params.rrd, tck);
93 trtp = 4; /* Valid for 800 and 1066, use 5 for 1333 */
94 tfaw = MCEIL(mrc_params->params.faw, tck);
96 wl = 5 + mrc_params->ddr_speed;
98 dtr0 &= ~DTR0_DFREQ_MASK;
99 dtr0 |= mrc_params->ddr_speed;
100 dtr0 &= ~DTR0_TCL_MASK;
102 dtr0 |= ((tcl - 5) << 12);
103 dtr0 &= ~DTR0_TRP_MASK;
104 dtr0 |= ((trp - 5) << 4); /* 5 bit DRAM Clock */
105 dtr0 &= ~DTR0_TRCD_MASK;
106 dtr0 |= ((trcd - 5) << 8); /* 5 bit DRAM Clock */
108 dtr1 &= ~DTR1_TWCL_MASK;
111 dtr1 &= ~DTR1_TWTP_MASK;
112 dtr1 |= ((wl + 4 + twr - 14) << 8); /* Change to tWTP */
113 dtr1 &= ~DTR1_TRTP_MASK;
114 dtr1 |= ((MMAX(trtp, 4) - 3) << 28); /* 4 bit DRAM Clock */
115 dtr1 &= ~DTR1_TRRD_MASK;
116 dtr1 |= ((trrd - 4) << 24); /* 4 bit DRAM Clock */
117 dtr1 &= ~DTR1_TCMD_MASK;
119 dtr1 &= ~DTR1_TRAS_MASK;
120 dtr1 |= ((tras - 14) << 20); /* 6 bit DRAM Clock */
121 dtr1 &= ~DTR1_TFAW_MASK;
122 dtr1 |= ((((tfaw + 1) >> 1) - 5) << 16);/* 4 bit DRAM Clock */
123 /* Set 4 Clock CAS to CAS delay (multi-burst) */
124 dtr1 &= ~DTR1_TCCD_MASK;
126 dtr2 &= ~DTR2_TRRDR_MASK;
128 dtr2 &= ~DTR2_TWWDR_MASK;
130 dtr2 &= ~DTR2_TRWDR_MASK;
133 dtr3 &= ~DTR3_TWRDR_MASK;
135 dtr3 &= ~DTR3_TXXXX_MASK;
138 dtr3 &= ~DTR3_TRWSR_MASK;
139 if (mrc_params->ddr_speed == DDRFREQ_800) {
140 /* Extended RW delay (+1) */
141 dtr3 |= ((tcl - 5 + 1) << 8);
142 } else if (mrc_params->ddr_speed == DDRFREQ_1066) {
143 /* Extended RW delay (+1) */
144 dtr3 |= ((tcl - 5 + 1) << 8);
147 dtr3 &= ~DTR3_TWRSR_MASK;
148 dtr3 |= ((4 + wl + twtr - 11) << 13);
150 dtr3 &= ~DTR3_TXP_MASK;
151 if (mrc_params->ddr_speed == DDRFREQ_800)
152 dtr3 |= ((MMAX(0, 1 - 1)) << 22);
154 dtr3 |= ((MMAX(0, 2 - 1)) << 22);
156 dtr4 &= ~DTR4_WRODTSTRT_MASK;
158 dtr4 &= ~DTR4_WRODTSTOP_MASK;
160 dtr4 &= ~DTR4_XXXX1_MASK;
161 dtr4 |= ((1 + tmp1 - tmp2 + 2) << 8);
162 dtr4 &= ~DTR4_XXXX2_MASK;
163 dtr4 |= ((1 + tmp1 - tmp2 + 2) << 12);
164 dtr4 &= ~(DTR4_ODTDIS | DTR4_TRGSTRDIS);
166 msg_port_write(MEM_CTLR, DTR0, dtr0);
167 msg_port_write(MEM_CTLR, DTR1, dtr1);
168 msg_port_write(MEM_CTLR, DTR2, dtr2);
169 msg_port_write(MEM_CTLR, DTR3, dtr3);
170 msg_port_write(MEM_CTLR, DTR4, dtr4);
175 /* Configure MCU before jedec init sequence */
176 void prog_decode_before_jedec(struct mrc_params *mrc_params)
186 /* Disable power saving features */
187 dpmc0 = msg_port_read(MEM_CTLR, DPMC0);
188 dpmc0 |= (DPMC0_CLKGTDIS | DPMC0_DISPWRDN);
189 dpmc0 &= ~DPMC0_PCLSTO_MASK;
190 dpmc0 &= ~DPMC0_DYNSREN;
191 msg_port_write(MEM_CTLR, DPMC0, dpmc0);
193 /* Disable out of order transactions */
194 dsch = msg_port_read(MEM_CTLR, DSCH);
195 dsch |= (DSCH_OOODIS | DSCH_NEWBYPDIS);
196 msg_port_write(MEM_CTLR, DSCH, dsch);
198 /* Disable issuing the REF command */
199 drfc = msg_port_read(MEM_CTLR, DRFC);
200 drfc &= ~DRFC_TREFI_MASK;
201 msg_port_write(MEM_CTLR, DRFC, drfc);
203 /* Disable ZQ calibration short */
204 dcal = msg_port_read(MEM_CTLR, DCAL);
205 dcal &= ~DCAL_ZQCINT_MASK;
206 dcal &= ~DCAL_SRXZQCL_MASK;
207 msg_port_write(MEM_CTLR, DCAL, dcal);
210 * Training performed in address mode 0, rank population has limited
211 * impact, however simulator complains if enabled non-existing rank.
214 if (mrc_params->rank_enables & 1)
216 if (mrc_params->rank_enables & 2)
218 msg_port_write(MEM_CTLR, DRP, drp);
224 * After Cold Reset, BIOS should set COLDWAKE bit to 1 before
225 * sending the WAKE message to the Dunit.
227 * For Standby Exit, or any other mode in which the DRAM is in
228 * SR, this bit must be set to 0.
230 void perform_ddr_reset(struct mrc_params *mrc_params)
234 /* Set COLDWAKE bit before sending the WAKE message */
235 mrc_write_mask(MEM_CTLR, DRMC, DRMC_COLDWAKE, DRMC_COLDWAKE);
237 /* Send wake command to DUNIT (MUST be done before JEDEC) */
240 /* Set default value */
241 msg_port_write(MEM_CTLR, DRMC,
242 mrc_params->rd_odt_value == 0 ? DRMC_ODTMODE : 0);
249 * This function performs some initialization on the DDRIO unit.
250 * This function is dependent on BOARD_ID, DDR_SPEED, and CHANNEL_ENABLES.
252 void ddrphy_init(struct mrc_params *mrc_params)
255 uint8_t ch; /* channel counter */
256 uint8_t rk; /* rank counter */
257 uint8_t bl_grp; /* byte lane group counter (2 BLs per module) */
258 uint8_t bl_divisor = 1; /* byte lane divisor */
259 /* For DDR3 --> 0 == 800, 1 == 1066, 2 == 1333 */
260 uint8_t speed = mrc_params->ddr_speed & 3;
266 cas = mrc_params->params.cl;
267 cwl = 5 + mrc_params->ddr_speed;
269 /* ddrphy_init starts */
270 mrc_post_code(0x03, 0x00);
274 * Make sure IOBUFACT is deasserted before initializing the DDR PHY
277 * Make sure WRPTRENABLE is deasserted before initializing the DDR PHY
279 for (ch = 0; ch < NUM_CHANNELS; ch++) {
280 if (mrc_params->channel_enables & (1 << ch)) {
281 /* Deassert DDRPHY Initialization Complete */
282 mrc_alt_write_mask(DDRPHY,
283 CMDPMCONFIG0 + ch * DDRIOCCC_CH_OFFSET,
284 ~(1 << 20), 1 << 20); /* SPID_INIT_COMPLETE=0 */
285 /* Deassert IOBUFACT */
286 mrc_alt_write_mask(DDRPHY,
287 CMDCFGREG0 + ch * DDRIOCCC_CH_OFFSET,
288 ~(1 << 2), 1 << 2); /* IOBUFACTRST_N=0 */
290 mrc_alt_write_mask(DDRPHY,
291 CMDPTRREG + ch * DDRIOCCC_CH_OFFSET,
292 ~(1 << 0), 1 << 0); /* WRPTRENABLE=0 */
296 /* Put PHY in reset */
297 mrc_alt_write_mask(DDRPHY, MASTERRSTN, 0, 1);
299 /* Initialize DQ01, DQ23, CMD, CLK-CTL, COMP modules */
302 mrc_post_code(0x03, 0x10);
303 for (ch = 0; ch < NUM_CHANNELS; ch++) {
304 if (mrc_params->channel_enables & (1 << ch)) {
307 bl_grp < (NUM_BYTE_LANES / bl_divisor) / 2;
309 /* Analog MUX select - IO2xCLKSEL */
310 mrc_alt_write_mask(DDRPHY,
312 bl_grp * DDRIODQ_BL_OFFSET +
313 ch * DDRIODQ_CH_OFFSET,
314 bl_grp ? 0 : (1 << 22), 1 << 22);
317 switch (mrc_params->rd_odt_value) {
333 mrc_alt_write_mask(DDRPHY,
335 bl_grp * DDRIODQ_BL_OFFSET +
336 ch * DDRIODQ_CH_OFFSET,
339 mrc_alt_write_mask(DDRPHY,
341 bl_grp * DDRIODQ_BL_OFFSET +
342 ch * DDRIODQ_CH_OFFSET,
345 /* Dynamic ODT/DIFFAMP */
346 temp = (cas << 24) | (cas << 16) |
347 (cas << 8) | (cas << 0);
363 /* Launch Time: ODT, DIFFAMP, ODT, DIFFAMP */
364 mrc_alt_write_mask(DDRPHY,
366 bl_grp * DDRIODQ_BL_OFFSET +
367 ch * DDRIODQ_CH_OFFSET,
372 temp = (0x06 << 16) | (0x07 << 8);
375 temp = (0x07 << 16) | (0x08 << 8);
378 temp = (0x09 << 16) | (0x0a << 8);
381 temp = (0x0a << 16) | (0x0b << 8);
385 /* On Duration: ODT, DIFFAMP */
386 mrc_alt_write_mask(DDRPHY,
388 bl_grp * DDRIODQ_BL_OFFSET +
389 ch * DDRIODQ_CH_OFFSET,
391 /* On Duration: ODT, DIFFAMP */
392 mrc_alt_write_mask(DDRPHY,
394 bl_grp * DDRIODQ_BL_OFFSET +
395 ch * DDRIODQ_CH_OFFSET,
398 switch (mrc_params->rd_odt_value) {
400 /* override DIFFAMP=on, ODT=off */
401 temp = (0x3f << 16) | (0x3f << 10);
404 /* override DIFFAMP=on, ODT=on */
405 temp = (0x3f << 16) | (0x2a << 10);
409 /* Override: DIFFAMP, ODT */
410 mrc_alt_write_mask(DDRPHY,
412 bl_grp * DDRIODQ_BL_OFFSET +
413 ch * DDRIODQ_CH_OFFSET,
415 /* Override: DIFFAMP, ODT */
416 mrc_alt_write_mask(DDRPHY,
418 bl_grp * DDRIODQ_BL_OFFSET +
419 ch * DDRIODQ_CH_OFFSET,
424 /* 1xCLK Domain Timings: tEDP,RCVEN,WDQS (PO) */
425 mrc_alt_write_mask(DDRPHY,
427 bl_grp * DDRIODQ_BL_OFFSET +
428 ch * DDRIODQ_CH_OFFSET,
429 ((cas + 7) << 16) | ((cas - 4) << 8) |
430 ((cwl - 2) << 0), 0x003f1f1f);
431 mrc_alt_write_mask(DDRPHY,
433 bl_grp * DDRIODQ_BL_OFFSET +
434 ch * DDRIODQ_CH_OFFSET,
435 ((cas + 7) << 16) | ((cas - 4) << 8) |
436 ((cwl - 2) << 0), 0x003f1f1f);
438 /* RCVEN Bypass (PO) */
439 mrc_alt_write_mask(DDRPHY,
441 bl_grp * DDRIODQ_BL_OFFSET +
442 ch * DDRIODQ_CH_OFFSET,
444 mrc_alt_write_mask(DDRPHY,
446 bl_grp * DDRIODQ_BL_OFFSET +
447 ch * DDRIODQ_CH_OFFSET,
451 mrc_alt_write_mask(DDRPHY,
453 bl_grp * DDRIODQ_BL_OFFSET +
454 ch * DDRIODQ_CH_OFFSET,
456 mrc_alt_write_mask(DDRPHY,
458 bl_grp * DDRIODQ_BL_OFFSET +
459 ch * DDRIODQ_CH_OFFSET,
463 /* Internal Vref Code, Enable#, Ext_or_Int (1=Ext) */
464 mrc_alt_write_mask(DDRPHY,
466 bl_grp * DDRIODQ_BL_OFFSET +
467 ch * DDRIODQ_CH_OFFSET,
468 (0x03 << 2) | (0x0 << 1) | (0x0 << 0),
470 /* Internal Vref Code, Enable#, Ext_or_Int (1=Ext) */
471 mrc_alt_write_mask(DDRPHY,
473 bl_grp * DDRIODQ_BL_OFFSET +
474 ch * DDRIODQ_CH_OFFSET,
475 (0x03 << 2) | (0x0 << 1) | (0x0 << 0),
477 /* Per-Bit De-Skew Enable */
478 mrc_alt_write_mask(DDRPHY,
480 bl_grp * DDRIODQ_BL_OFFSET +
481 ch * DDRIODQ_CH_OFFSET,
483 /* Per-Bit De-Skew Enable */
484 mrc_alt_write_mask(DDRPHY,
486 bl_grp * DDRIODQ_BL_OFFSET +
487 ch * DDRIODQ_CH_OFFSET,
492 mrc_alt_write_mask(DDRPHY,
493 CMDOBSCKEBBCTL + ch * DDRIOCCC_CH_OFFSET,
496 /* Enable tristate control of cmd/address bus */
497 mrc_alt_write_mask(DDRPHY,
498 CMDCFGREG0 + ch * DDRIOCCC_CH_OFFSET,
502 mrc_alt_write_mask(DDRPHY,
503 CMDRCOMPODT + ch * DDRIOCCC_CH_OFFSET,
504 (0x03 << 5) | (0x03 << 0), 0x3ff);
506 /* CMDPM* registers must be programmed in this order */
508 /* Turn On Delays: SFR (regulator), MPLL */
509 mrc_alt_write_mask(DDRPHY,
510 CMDPMDLYREG4 + ch * DDRIOCCC_CH_OFFSET,
511 0xffffffff, 0xffffffff);
513 * Delays: ASSERT_IOBUFACT_to_ALLON0_for_PM_MSG_3,
514 * VREG (MDLL) Turn On, ALLON0_to_DEASSERT_IOBUFACT
515 * for_PM_MSG_gt0, MDLL Turn On
517 mrc_alt_write_mask(DDRPHY,
518 CMDPMDLYREG3 + ch * DDRIOCCC_CH_OFFSET,
519 0xfffff616, 0xffffffff);
520 /* MPLL Divider Reset Delays */
521 mrc_alt_write_mask(DDRPHY,
522 CMDPMDLYREG2 + ch * DDRIOCCC_CH_OFFSET,
523 0xffffffff, 0xffffffff);
524 /* Turn Off Delays: VREG, Staggered MDLL, MDLL, PI */
525 mrc_alt_write_mask(DDRPHY,
526 CMDPMDLYREG1 + ch * DDRIOCCC_CH_OFFSET,
527 0xffffffff, 0xffffffff);
528 /* Turn On Delays: MPLL, Staggered MDLL, PI, IOBUFACT */
529 mrc_alt_write_mask(DDRPHY,
530 CMDPMDLYREG0 + ch * DDRIOCCC_CH_OFFSET,
531 0xffffffff, 0xffffffff);
532 /* Allow PUnit signals */
533 mrc_alt_write_mask(DDRPHY,
534 CMDPMCONFIG0 + ch * DDRIOCCC_CH_OFFSET,
535 (0x6 << 8) | (0x1 << 6) | (0x4 << 0),
537 /* DLL_VREG Bias Trim, VREF Tuning for DLL_VREG */
538 mrc_alt_write_mask(DDRPHY,
539 CMDMDLLCTL + ch * DDRIOCCC_CH_OFFSET,
540 (0x3 << 4) | (0x7 << 0), 0x7f);
543 mrc_alt_write_mask(DDRPHY,
544 CCOBSCKEBBCTL + ch * DDRIOCCC_CH_OFFSET,
545 0, 1 << 24); /* CLKEBB */
546 /* Buffer Enable: CS,CKE,ODT,CLK */
547 mrc_alt_write_mask(DDRPHY,
548 CCCFGREG0 + ch * DDRIOCCC_CH_OFFSET,
551 mrc_alt_write_mask(DDRPHY,
552 CCRCOMPODT + ch * DDRIOCCC_CH_OFFSET,
553 (0x03 << 8) | (0x03 << 0), 0x00001f1f);
554 /* DLL_VREG Bias Trim, VREF Tuning for DLL_VREG */
555 mrc_alt_write_mask(DDRPHY,
556 CCMDLLCTL + ch * DDRIOCCC_CH_OFFSET,
557 (0x3 << 4) | (0x7 << 0), 0x7f);
560 * COMP (RON channel specific)
561 * - DQ/DQS/DM RON: 32 Ohm
562 * - CTRL/CMD RON: 27 Ohm
565 /* RCOMP Vref PU/PD */
566 mrc_alt_write_mask(DDRPHY,
567 DQVREFCH0 + ch * DDRCOMP_CH_OFFSET,
568 (0x08 << 24) | (0x03 << 16), 0x3f3f0000);
569 /* RCOMP Vref PU/PD */
570 mrc_alt_write_mask(DDRPHY,
571 CMDVREFCH0 + ch * DDRCOMP_CH_OFFSET,
572 (0x0C << 24) | (0x03 << 16), 0x3f3f0000);
573 /* RCOMP Vref PU/PD */
574 mrc_alt_write_mask(DDRPHY,
575 CLKVREFCH0 + ch * DDRCOMP_CH_OFFSET,
576 (0x0F << 24) | (0x03 << 16), 0x3f3f0000);
577 /* RCOMP Vref PU/PD */
578 mrc_alt_write_mask(DDRPHY,
579 DQSVREFCH0 + ch * DDRCOMP_CH_OFFSET,
580 (0x08 << 24) | (0x03 << 16), 0x3f3f0000);
581 /* RCOMP Vref PU/PD */
582 mrc_alt_write_mask(DDRPHY,
583 CTLVREFCH0 + ch * DDRCOMP_CH_OFFSET,
584 (0x0C << 24) | (0x03 << 16), 0x3f3f0000);
586 /* DQS Swapped Input Enable */
587 mrc_alt_write_mask(DDRPHY,
588 COMPEN1CH0 + ch * DDRCOMP_CH_OFFSET,
589 (1 << 19) | (1 << 17), 0xc00ac000);
591 /* ODT VREF = 1.5 x 274/360+274 = 0.65V (code of ~50) */
593 mrc_alt_write_mask(DDRPHY,
594 DQVREFCH0 + ch * DDRCOMP_CH_OFFSET,
595 (0x32 << 8) | (0x03 << 0), 0x00003f3f);
597 mrc_alt_write_mask(DDRPHY,
598 DQSVREFCH0 + ch * DDRCOMP_CH_OFFSET,
599 (0x32 << 8) | (0x03 << 0), 0x00003f3f);
601 mrc_alt_write_mask(DDRPHY,
602 CLKVREFCH0 + ch * DDRCOMP_CH_OFFSET,
603 (0x0E << 8) | (0x05 << 0), 0x00003f3f);
606 * Slew rate settings are frequency specific,
607 * numbers below are for 800Mhz (speed == 0)
608 * - DQ/DQS/DM/CLK SR: 4V/ns,
609 * - CTRL/CMD SR: 1.5V/ns
611 temp = (0x0e << 16) | (0x0e << 12) | (0x08 << 8) |
612 (0x0b << 4) | (0x0b << 0);
613 /* DCOMP Delay Select: CTL,CMD,CLK,DQS,DQ */
614 mrc_alt_write_mask(DDRPHY,
615 DLYSELCH0 + ch * DDRCOMP_CH_OFFSET,
617 /* TCO Vref CLK,DQS,DQ */
618 mrc_alt_write_mask(DDRPHY,
619 TCOVREFCH0 + ch * DDRCOMP_CH_OFFSET,
620 (0x05 << 16) | (0x05 << 8) | (0x05 << 0),
622 /* ODTCOMP CMD/CTL PU/PD */
623 mrc_alt_write_mask(DDRPHY,
624 CCBUFODTCH0 + ch * DDRCOMP_CH_OFFSET,
625 (0x03 << 8) | (0x03 << 0),
628 mrc_alt_write_mask(DDRPHY,
629 COMPEN0CH0 + ch * DDRCOMP_CH_OFFSET,
633 /* DQ COMP Overrides */
635 mrc_alt_write_mask(DDRPHY,
636 DQDRVPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
637 (1 << 31) | (0x0a << 16),
640 mrc_alt_write_mask(DDRPHY,
641 DQDRVPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
642 (1 << 31) | (0x0a << 16),
645 mrc_alt_write_mask(DDRPHY,
646 DQDLYPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
647 (1 << 31) | (0x10 << 16),
650 mrc_alt_write_mask(DDRPHY,
651 DQDLYPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
652 (1 << 31) | (0x10 << 16),
655 mrc_alt_write_mask(DDRPHY,
656 DQODTPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
657 (1 << 31) | (0x0b << 16),
660 mrc_alt_write_mask(DDRPHY,
661 DQODTPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
662 (1 << 31) | (0x0b << 16),
665 mrc_alt_write_mask(DDRPHY,
666 DQTCOPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
669 mrc_alt_write_mask(DDRPHY,
670 DQTCOPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
673 /* DQS COMP Overrides */
675 mrc_alt_write_mask(DDRPHY,
676 DQSDRVPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
677 (1 << 31) | (0x0a << 16),
680 mrc_alt_write_mask(DDRPHY,
681 DQSDRVPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
682 (1 << 31) | (0x0a << 16),
685 mrc_alt_write_mask(DDRPHY,
686 DQSDLYPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
687 (1 << 31) | (0x10 << 16),
690 mrc_alt_write_mask(DDRPHY,
691 DQSDLYPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
692 (1 << 31) | (0x10 << 16),
695 mrc_alt_write_mask(DDRPHY,
696 DQSODTPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
697 (1 << 31) | (0x0b << 16),
700 mrc_alt_write_mask(DDRPHY,
701 DQSODTPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
702 (1 << 31) | (0x0b << 16),
705 mrc_alt_write_mask(DDRPHY,
706 DQSTCOPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
709 mrc_alt_write_mask(DDRPHY,
710 DQSTCOPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
713 /* CLK COMP Overrides */
715 mrc_alt_write_mask(DDRPHY,
716 CLKDRVPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
717 (1 << 31) | (0x0c << 16),
720 mrc_alt_write_mask(DDRPHY,
721 CLKDRVPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
722 (1 << 31) | (0x0c << 16),
725 mrc_alt_write_mask(DDRPHY,
726 CLKDLYPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
727 (1 << 31) | (0x07 << 16),
730 mrc_alt_write_mask(DDRPHY,
731 CLKDLYPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
732 (1 << 31) | (0x07 << 16),
735 mrc_alt_write_mask(DDRPHY,
736 CLKODTPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
737 (1 << 31) | (0x0b << 16),
740 mrc_alt_write_mask(DDRPHY,
741 CLKODTPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
742 (1 << 31) | (0x0b << 16),
745 mrc_alt_write_mask(DDRPHY,
746 CLKTCOPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
749 mrc_alt_write_mask(DDRPHY,
750 CLKTCOPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
753 /* CMD COMP Overrides */
755 mrc_alt_write_mask(DDRPHY,
756 CMDDRVPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
757 (1 << 31) | (0x0d << 16),
760 mrc_alt_write_mask(DDRPHY,
761 CMDDRVPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
762 (1 << 31) | (0x0d << 16),
765 mrc_alt_write_mask(DDRPHY,
766 CMDDLYPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
767 (1 << 31) | (0x0a << 16),
770 mrc_alt_write_mask(DDRPHY,
771 CMDDLYPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
772 (1 << 31) | (0x0a << 16),
775 /* CTL COMP Overrides */
777 mrc_alt_write_mask(DDRPHY,
778 CTLDRVPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
779 (1 << 31) | (0x0d << 16),
782 mrc_alt_write_mask(DDRPHY,
783 CTLDRVPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
784 (1 << 31) | (0x0d << 16),
787 mrc_alt_write_mask(DDRPHY,
788 CTLDLYPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
789 (1 << 31) | (0x0a << 16),
792 mrc_alt_write_mask(DDRPHY,
793 CTLDLYPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
794 (1 << 31) | (0x0a << 16),
797 /* DQ TCOCOMP Overrides */
799 mrc_alt_write_mask(DDRPHY,
800 DQTCOPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
801 (1 << 31) | (0x1f << 16),
804 mrc_alt_write_mask(DDRPHY,
805 DQTCOPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
806 (1 << 31) | (0x1f << 16),
809 /* DQS TCOCOMP Overrides */
811 mrc_alt_write_mask(DDRPHY,
812 DQSTCOPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
813 (1 << 31) | (0x1f << 16),
816 mrc_alt_write_mask(DDRPHY,
817 DQSTCOPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
818 (1 << 31) | (0x1f << 16),
821 /* CLK TCOCOMP Overrides */
823 mrc_alt_write_mask(DDRPHY,
824 CLKTCOPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
825 (1 << 31) | (0x1f << 16),
828 mrc_alt_write_mask(DDRPHY,
829 CLKTCOPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
830 (1 << 31) | (0x1f << 16),
834 /* program STATIC delays */
836 set_wcmd(ch, ddr_wcmd[PLATFORM_ID]);
838 set_wcmd(ch, ddr_wclk[PLATFORM_ID] + HALF_CLK);
841 for (rk = 0; rk < NUM_RANKS; rk++) {
842 if (mrc_params->rank_enables & (1 << rk)) {
843 set_wclk(ch, rk, ddr_wclk[PLATFORM_ID]);
845 set_wctl(ch, rk, ddr_wctl[PLATFORM_ID]);
847 set_wctl(ch, rk, ddr_wclk[PLATFORM_ID] + HALF_CLK);
854 /* COMP (non channel specific) */
855 /* RCOMP: Dither PU Enable */
856 mrc_alt_write_mask(DDRPHY, DQANADRVPUCTL, 1 << 30, 1 << 30);
857 /* RCOMP: Dither PD Enable */
858 mrc_alt_write_mask(DDRPHY, DQANADRVPDCTL, 1 << 30, 1 << 30);
859 /* RCOMP: Dither PU Enable */
860 mrc_alt_write_mask(DDRPHY, CMDANADRVPUCTL, 1 << 30, 1 << 30);
861 /* RCOMP: Dither PD Enable */
862 mrc_alt_write_mask(DDRPHY, CMDANADRVPDCTL, 1 << 30, 1 << 30);
863 /* RCOMP: Dither PU Enable */
864 mrc_alt_write_mask(DDRPHY, CLKANADRVPUCTL, 1 << 30, 1 << 30);
865 /* RCOMP: Dither PD Enable */
866 mrc_alt_write_mask(DDRPHY, CLKANADRVPDCTL, 1 << 30, 1 << 30);
867 /* RCOMP: Dither PU Enable */
868 mrc_alt_write_mask(DDRPHY, DQSANADRVPUCTL, 1 << 30, 1 << 30);
869 /* RCOMP: Dither PD Enable */
870 mrc_alt_write_mask(DDRPHY, DQSANADRVPDCTL, 1 << 30, 1 << 30);
871 /* RCOMP: Dither PU Enable */
872 mrc_alt_write_mask(DDRPHY, CTLANADRVPUCTL, 1 << 30, 1 << 30);
873 /* RCOMP: Dither PD Enable */
874 mrc_alt_write_mask(DDRPHY, CTLANADRVPDCTL, 1 << 30, 1 << 30);
875 /* ODT: Dither PU Enable */
876 mrc_alt_write_mask(DDRPHY, DQANAODTPUCTL, 1 << 30, 1 << 30);
877 /* ODT: Dither PD Enable */
878 mrc_alt_write_mask(DDRPHY, DQANAODTPDCTL, 1 << 30, 1 << 30);
879 /* ODT: Dither PU Enable */
880 mrc_alt_write_mask(DDRPHY, CLKANAODTPUCTL, 1 << 30, 1 << 30);
881 /* ODT: Dither PD Enable */
882 mrc_alt_write_mask(DDRPHY, CLKANAODTPDCTL, 1 << 30, 1 << 30);
883 /* ODT: Dither PU Enable */
884 mrc_alt_write_mask(DDRPHY, DQSANAODTPUCTL, 1 << 30, 1 << 30);
885 /* ODT: Dither PD Enable */
886 mrc_alt_write_mask(DDRPHY, DQSANAODTPDCTL, 1 << 30, 1 << 30);
887 /* DCOMP: Dither PU Enable */
888 mrc_alt_write_mask(DDRPHY, DQANADLYPUCTL, 1 << 30, 1 << 30);
889 /* DCOMP: Dither PD Enable */
890 mrc_alt_write_mask(DDRPHY, DQANADLYPDCTL, 1 << 30, 1 << 30);
891 /* DCOMP: Dither PU Enable */
892 mrc_alt_write_mask(DDRPHY, CMDANADLYPUCTL, 1 << 30, 1 << 30);
893 /* DCOMP: Dither PD Enable */
894 mrc_alt_write_mask(DDRPHY, CMDANADLYPDCTL, 1 << 30, 1 << 30);
895 /* DCOMP: Dither PU Enable */
896 mrc_alt_write_mask(DDRPHY, CLKANADLYPUCTL, 1 << 30, 1 << 30);
897 /* DCOMP: Dither PD Enable */
898 mrc_alt_write_mask(DDRPHY, CLKANADLYPDCTL, 1 << 30, 1 << 30);
899 /* DCOMP: Dither PU Enable */
900 mrc_alt_write_mask(DDRPHY, DQSANADLYPUCTL, 1 << 30, 1 << 30);
901 /* DCOMP: Dither PD Enable */
902 mrc_alt_write_mask(DDRPHY, DQSANADLYPDCTL, 1 << 30, 1 << 30);
903 /* DCOMP: Dither PU Enable */
904 mrc_alt_write_mask(DDRPHY, CTLANADLYPUCTL, 1 << 30, 1 << 30);
905 /* DCOMP: Dither PD Enable */
906 mrc_alt_write_mask(DDRPHY, CTLANADLYPDCTL, 1 << 30, 1 << 30);
907 /* TCO: Dither PU Enable */
908 mrc_alt_write_mask(DDRPHY, DQANATCOPUCTL, 1 << 30, 1 << 30);
909 /* TCO: Dither PD Enable */
910 mrc_alt_write_mask(DDRPHY, DQANATCOPDCTL, 1 << 30, 1 << 30);
911 /* TCO: Dither PU Enable */
912 mrc_alt_write_mask(DDRPHY, CLKANATCOPUCTL, 1 << 30, 1 << 30);
913 /* TCO: Dither PD Enable */
914 mrc_alt_write_mask(DDRPHY, CLKANATCOPDCTL, 1 << 30, 1 << 30);
915 /* TCO: Dither PU Enable */
916 mrc_alt_write_mask(DDRPHY, DQSANATCOPUCTL, 1 << 30, 1 << 30);
917 /* TCO: Dither PD Enable */
918 mrc_alt_write_mask(DDRPHY, DQSANATCOPDCTL, 1 << 30, 1 << 30);
919 /* TCOCOMP: Pulse Count */
920 mrc_alt_write_mask(DDRPHY, TCOCNTCTRL, 1, 3);
921 /* ODT: CMD/CTL PD/PU */
922 mrc_alt_write_mask(DDRPHY, CHNLBUFSTATIC,
923 (0x03 << 24) | (0x03 << 16), 0x1f1f0000);
924 /* Set 1us counter */
925 mrc_alt_write_mask(DDRPHY, MSCNTR, 0x64, 0xff);
926 mrc_alt_write_mask(DDRPHY, LATCH1CTL, 0x1 << 28, 0x70000000);
928 /* Release PHY from reset */
929 mrc_alt_write_mask(DDRPHY, MASTERRSTN, 1, 1);
932 mrc_post_code(0x03, 0x11);
934 for (ch = 0; ch < NUM_CHANNELS; ch++) {
935 if (mrc_params->channel_enables & (1 << ch)) {
938 bl_grp < (NUM_BYTE_LANES / bl_divisor) / 2;
940 mrc_alt_write_mask(DDRPHY,
942 bl_grp * DDRIODQ_BL_OFFSET +
943 ch * DDRIODQ_CH_OFFSET,
945 1 << 13); /* Enable VREG */
950 mrc_alt_write_mask(DDRPHY, ECCMDLLCTL,
951 1 << 13, 1 << 13); /* Enable VREG */
954 mrc_alt_write_mask(DDRPHY,
955 CMDMDLLCTL + ch * DDRIOCCC_CH_OFFSET,
956 1 << 13, 1 << 13); /* Enable VREG */
959 mrc_alt_write_mask(DDRPHY,
960 CCMDLLCTL + ch * DDRIOCCC_CH_OFFSET,
961 1 << 13, 1 << 13); /* Enable VREG */
967 mrc_post_code(0x03, 0x12);
970 for (ch = 0; ch < NUM_CHANNELS; ch++) {
971 if (mrc_params->channel_enables & (1 << ch)) {
974 bl_grp < (NUM_BYTE_LANES / bl_divisor) / 2;
976 mrc_alt_write_mask(DDRPHY,
978 bl_grp * DDRIODQ_BL_OFFSET +
979 ch * DDRIODQ_CH_OFFSET,
981 1 << 17); /* Enable MCDLL */
986 mrc_alt_write_mask(DDRPHY, ECCMDLLCTL,
987 1 << 17, 1 << 17); /* Enable MCDLL */
990 mrc_alt_write_mask(DDRPHY,
991 CMDMDLLCTL + ch * DDRIOCCC_CH_OFFSET,
992 1 << 18, 1 << 18); /* Enable MCDLL */
995 mrc_alt_write_mask(DDRPHY,
996 CCMDLLCTL + ch * DDRIOCCC_CH_OFFSET,
997 1 << 18, 1 << 18); /* Enable MCDLL */
1003 mrc_post_code(0x03, 0x13);
1006 for (ch = 0; ch < NUM_CHANNELS; ch++) {
1007 if (mrc_params->channel_enables & (1 << ch)) {
1010 bl_grp < (NUM_BYTE_LANES / bl_divisor) / 2;
1012 #ifdef FORCE_16BIT_DDRIO
1014 (mrc_params->channel_width == X16)) ?
1020 mrc_alt_write_mask(DDRPHY,
1022 bl_grp * DDRIODQ_BL_OFFSET +
1023 ch * DDRIODQ_CH_OFFSET,
1027 mrc_alt_write_mask(DDRPHY,
1029 bl_grp * DDRIODQ_BL_OFFSET +
1030 ch * DDRIODQ_CH_OFFSET,
1033 /* Enable RXDLL Overrides BL0 */
1034 mrc_alt_write_mask(DDRPHY,
1036 bl_grp * DDRIODQ_BL_OFFSET +
1037 ch * DDRIODQ_CH_OFFSET,
1043 mrc_alt_write_mask(DDRPHY, ECCDLLTXCTL,
1048 mrc_alt_write_mask(DDRPHY,
1049 CMDDLLTXCTL + ch * DDRIOCCC_CH_OFFSET,
1056 mrc_post_code(0x03, 0x14);
1058 for (ch = 0; ch < NUM_CHANNELS; ch++) {
1059 if (mrc_params->channel_enables & (1 << ch)) {
1060 /* Host To Memory Clock Alignment (HMC) for 800/1066 */
1062 bl_grp < (NUM_BYTE_LANES / bl_divisor) / 2;
1064 /* CLK_ALIGN_MOD_ID */
1065 mrc_alt_write_mask(DDRPHY,
1067 bl_grp * DDRIODQ_BL_OFFSET +
1068 ch * DDRIODQ_CH_OFFSET,
1073 mrc_alt_write_mask(DDRPHY,
1074 ECCCLKALIGNREG2 + ch * DDRIODQ_CH_OFFSET,
1076 mrc_alt_write_mask(DDRPHY,
1077 CMDCLKALIGNREG2 + ch * DDRIODQ_CH_OFFSET,
1079 mrc_alt_write_mask(DDRPHY,
1080 CCCLKALIGNREG2 + ch * DDRIODQ_CH_OFFSET,
1082 mrc_alt_write_mask(DDRPHY,
1083 CMDCLKALIGNREG0 + ch * DDRIOCCC_CH_OFFSET,
1086 * NUM_SAMPLES, MAX_SAMPLES,
1087 * MACRO_PI_STEP, MICRO_PI_STEP
1089 mrc_alt_write_mask(DDRPHY,
1090 CMDCLKALIGNREG1 + ch * DDRIOCCC_CH_OFFSET,
1091 (0x18 << 16) | (0x10 << 8) |
1092 (0x8 << 2) | (0x1 << 0),
1094 /* TOTAL_NUM_MODULES, FIRST_U_PARTITION */
1095 mrc_alt_write_mask(DDRPHY,
1096 CMDCLKALIGNREG2 + ch * DDRIOCCC_CH_OFFSET,
1097 (0x10 << 16) | (0x4 << 8) | (0x2 << 4),
1100 /* START_CLK_ALIGN=1 */
1101 mrc_alt_write_mask(DDRPHY,
1102 CMDCLKALIGNREG0 + ch * DDRIOCCC_CH_OFFSET,
1104 while (msg_port_alt_read(DDRPHY,
1105 CMDCLKALIGNREG0 + ch * DDRIOCCC_CH_OFFSET) &
1107 ; /* wait for START_CLK_ALIGN=0 */
1110 /* Set RD/WR Pointer Seperation & COUNTEN & FIFOPTREN */
1111 mrc_alt_write_mask(DDRPHY,
1112 CMDPTRREG + ch * DDRIOCCC_CH_OFFSET,
1113 1, 1); /* WRPTRENABLE=1 */
1116 /* enable bypass for CLK buffer (PO) */
1117 mrc_alt_write_mask(DDRPHY,
1118 COMPEN0CH0 + ch * DDRCOMP_CH_OFFSET,
1120 /* Initial COMP Enable */
1121 mrc_alt_write_mask(DDRPHY, CMPCTRL, 1, 1);
1122 /* wait for Initial COMP Enable = 0 */
1123 while (msg_port_alt_read(DDRPHY, CMPCTRL) & 1)
1125 /* disable bypass for CLK buffer (PO) */
1126 mrc_alt_write_mask(DDRPHY,
1127 COMPEN0CH0 + ch * DDRCOMP_CH_OFFSET,
1133 mrc_alt_write_mask(DDRPHY,
1134 CMDCFGREG0 + ch * DDRIOCCC_CH_OFFSET,
1135 1 << 2, 1 << 2); /* IOBUFACTRST_N=1 */
1137 /* DDRPHY initialization complete */
1138 mrc_alt_write_mask(DDRPHY,
1139 CMDPMCONFIG0 + ch * DDRIOCCC_CH_OFFSET,
1140 1 << 20, 1 << 20); /* SPID_INIT_COMPLETE=1 */
1147 /* This function performs JEDEC initialization on all enabled channels */
1148 void perform_jedec_init(struct mrc_params *mrc_params)
1150 uint8_t twr, wl, rank;
1162 /* jedec_init starts */
1163 mrc_post_code(0x04, 0x00);
1165 /* DDR3_RESET_SET=0, DDR3_RESET_RESET=1 */
1166 mrc_alt_write_mask(DDRPHY, CCDDR3RESETCTL, 2, 0x102);
1168 /* Assert RESET# for 200us */
1171 /* DDR3_RESET_SET=1, DDR3_RESET_RESET=0 */
1172 mrc_alt_write_mask(DDRPHY, CCDDR3RESETCTL, 0x100, 0x102);
1174 dtr0 = msg_port_read(MEM_CTLR, DTR0);
1177 * Set CKEVAL for populated ranks
1178 * then send NOP to each rank (#4550197)
1181 drp = msg_port_read(MEM_CTLR, DRP);
1184 drmc = msg_port_read(MEM_CTLR, DRMC);
1186 drmc |= (DRMC_CKEMODE | drp);
1188 msg_port_write(MEM_CTLR, DRMC, drmc);
1190 for (rank = 0; rank < NUM_RANKS; rank++) {
1191 /* Skip to next populated rank */
1192 if ((mrc_params->rank_enables & (1 << rank)) == 0)
1195 dram_init_command(DCMD_NOP(rank));
1198 msg_port_write(MEM_CTLR, DRMC,
1199 (mrc_params->rd_odt_value == 0 ? DRMC_ODTMODE : 0));
1203 * BIT[15:11] --> Always "0"
1204 * BIT[10:09] --> Rtt_WR: want "Dynamic ODT Off" (0)
1205 * BIT[08] --> Always "0"
1206 * BIT[07] --> SRT: use sr_temp_range
1207 * BIT[06] --> ASR: want "Manual SR Reference" (0)
1208 * BIT[05:03] --> CWL: use oem_tCWL
1209 * BIT[02:00] --> PASR: want "Full Array" (0)
1211 emrs2_cmd |= (2 << 3);
1212 wl = 5 + mrc_params->ddr_speed;
1213 emrs2_cmd |= ((wl - 5) << 9);
1214 emrs2_cmd |= (mrc_params->sr_temp_range << 13);
1218 * BIT[15:03] --> Always "0"
1219 * BIT[02] --> MPR: want "Normal Operation" (0)
1220 * BIT[01:00] --> MPR_Loc: want "Predefined Pattern" (0)
1222 emrs3_cmd |= (3 << 3);
1226 * BIT[15:13] --> Always "0"
1227 * BIT[12:12] --> Qoff: want "Output Buffer Enabled" (0)
1228 * BIT[11:11] --> TDQS: want "Disabled" (0)
1229 * BIT[10:10] --> Always "0"
1230 * BIT[09,06,02] --> Rtt_nom: use rtt_nom_value
1231 * BIT[08] --> Always "0"
1232 * BIT[07] --> WR_LVL: want "Disabled" (0)
1233 * BIT[05,01] --> DIC: use ron_value
1234 * BIT[04:03] --> AL: additive latency want "0" (0)
1235 * BIT[00] --> DLL: want "Enable" (0)
1237 * (BIT5|BIT1) set Ron value
1238 * 00 --> RZQ/6 (40ohm)
1239 * 01 --> RZQ/7 (34ohm)
1242 * (BIT9|BIT6|BIT2) set Rtt_nom value
1244 * 001 --> RZQ/4 ( 60ohm)
1245 * 010 --> RZQ/2 (120ohm)
1246 * 011 --> RZQ/6 ( 40ohm)
1249 emrs1_cmd |= (1 << 3);
1250 emrs1_cmd &= ~(1 << 6);
1252 if (mrc_params->ron_value == 0)
1253 emrs1_cmd |= (1 << 7);
1255 emrs1_cmd &= ~(1 << 7);
1257 if (mrc_params->rtt_nom_value == 0)
1258 emrs1_cmd |= (DDR3_EMRS1_RTTNOM_40 << 6);
1259 else if (mrc_params->rtt_nom_value == 1)
1260 emrs1_cmd |= (DDR3_EMRS1_RTTNOM_60 << 6);
1261 else if (mrc_params->rtt_nom_value == 2)
1262 emrs1_cmd |= (DDR3_EMRS1_RTTNOM_120 << 6);
1264 /* save MRS1 value (excluding control fields) */
1265 mrc_params->mrs1 = emrs1_cmd >> 6;
1269 * BIT[15:13] --> Always "0"
1270 * BIT[12] --> PPD: for Quark (1)
1271 * BIT[11:09] --> WR: use oem_tWR
1272 * BIT[08] --> DLL: want "Reset" (1, self clearing)
1273 * BIT[07] --> MODE: want "Normal" (0)
1274 * BIT[06:04,02] --> CL: use oem_tCAS
1275 * BIT[03] --> RD_BURST_TYPE: want "Interleave" (1)
1276 * BIT[01:00] --> BL: want "8 Fixed" (0)
1287 * BIT[02:02] "0" if oem_tCAS <= 11 (1866?)
1288 * BIT[06:04] use oem_tCAS-4
1290 mrs0_cmd |= (1 << 14);
1291 mrs0_cmd |= (1 << 18);
1292 mrs0_cmd |= ((((dtr0 >> 12) & 7) + 1) << 10);
1294 tck = t_ck[mrc_params->ddr_speed];
1295 /* Per JEDEC: tWR=15000ps DDR2/3 from 800-1600 */
1296 twr = MCEIL(15000, tck);
1297 mrs0_cmd |= ((twr - 4) << 15);
1299 for (rank = 0; rank < NUM_RANKS; rank++) {
1300 /* Skip to next populated rank */
1301 if ((mrc_params->rank_enables & (1 << rank)) == 0)
1304 emrs2_cmd |= (rank << 22);
1305 dram_init_command(emrs2_cmd);
1307 emrs3_cmd |= (rank << 22);
1308 dram_init_command(emrs3_cmd);
1310 emrs1_cmd |= (rank << 22);
1311 dram_init_command(emrs1_cmd);
1313 mrs0_cmd |= (rank << 22);
1314 dram_init_command(mrs0_cmd);
1316 dram_init_command(DCMD_ZQCL(rank));
1323 * Dunit Initialization Complete
1325 * Indicates that initialization of the Dunit has completed.
1327 * Memory accesses are permitted and maintenance operation begins.
1328 * Until this bit is set to a 1, the memory controller will not accept
1329 * DRAM requests from the MEMORY_MANAGER or HTE.
1331 void set_ddr_init_complete(struct mrc_params *mrc_params)
1337 dco = msg_port_read(MEM_CTLR, DCO);
1340 msg_port_write(MEM_CTLR, DCO, dco);
1346 * This function will retrieve relevant timing data
1348 * This data will be used on subsequent boots to speed up boot times
1349 * and is required for Suspend To RAM capabilities.
1351 void restore_timings(struct mrc_params *mrc_params)
1354 const struct mrc_timings *mt = &mrc_params->timings;
1356 for (ch = 0; ch < NUM_CHANNELS; ch++) {
1357 for (rk = 0; rk < NUM_RANKS; rk++) {
1358 for (bl = 0; bl < NUM_BYTE_LANES; bl++) {
1359 set_rcvn(ch, rk, bl, mt->rcvn[ch][rk][bl]);
1360 set_rdqs(ch, rk, bl, mt->rdqs[ch][rk][bl]);
1361 set_wdqs(ch, rk, bl, mt->wdqs[ch][rk][bl]);
1362 set_wdq(ch, rk, bl, mt->wdq[ch][rk][bl]);
1364 /* VREF (RANK0 only) */
1365 set_vref(ch, bl, mt->vref[ch][bl]);
1368 set_wctl(ch, rk, mt->wctl[ch][rk]);
1370 set_wcmd(ch, mt->wcmd[ch]);
1375 * Configure default settings normally set as part of read training
1377 * Some defaults have to be set earlier as they may affect earlier
1380 void default_timings(struct mrc_params *mrc_params)
1384 for (ch = 0; ch < NUM_CHANNELS; ch++) {
1385 for (rk = 0; rk < NUM_RANKS; rk++) {
1386 for (bl = 0; bl < NUM_BYTE_LANES; bl++) {
1387 set_rdqs(ch, rk, bl, 24);
1389 /* VREF (RANK0 only) */
1390 set_vref(ch, bl, 32);
1398 * This function will perform our RCVEN Calibration Algorithm.
1399 * We will only use the 2xCLK domain timings to perform RCVEN Calibration.
1400 * All byte lanes will be calibrated "simultaneously" per channel per rank.
1402 void rcvn_cal(struct mrc_params *mrc_params)
1404 uint8_t ch; /* channel counter */
1405 uint8_t rk; /* rank counter */
1406 uint8_t bl; /* byte lane counter */
1407 uint8_t bl_divisor = (mrc_params->channel_width == X16) ? 2 : 1;
1410 /* used to find placement for rank2rank sharing configs */
1411 uint32_t final_delay[NUM_CHANNELS][NUM_BYTE_LANES];
1413 /* used to find placement for rank2rank sharing configs */
1414 uint32_t num_ranks_enabled = 0;
1421 /* absolute PI value to be programmed on the byte lane */
1422 uint32_t delay[NUM_BYTE_LANES];
1423 u32 dtr1, dtr1_save;
1428 /* rcvn_cal starts */
1429 mrc_post_code(0x05, 0x00);
1432 /* need separate burst to sample DQS preamble */
1433 dtr1 = msg_port_read(MEM_CTLR, DTR1);
1435 dtr1 |= DTR1_TCCD_12CLK;
1436 msg_port_write(MEM_CTLR, DTR1, dtr1);
1440 /* need to set "final_delay[][]" elements to "0" */
1441 memset((void *)(final_delay), 0x00, (size_t)sizeof(final_delay));
1444 /* loop through each enabled channel */
1445 for (ch = 0; ch < NUM_CHANNELS; ch++) {
1446 if (mrc_params->channel_enables & (1 << ch)) {
1447 /* perform RCVEN Calibration on a per rank basis */
1448 for (rk = 0; rk < NUM_RANKS; rk++) {
1449 if (mrc_params->rank_enables & (1 << rk)) {
1451 * POST_CODE here indicates the current
1452 * channel and rank being calibrated
1454 mrc_post_code(0x05, 0x10 + ((ch << 4) | rk));
1457 /* et hard-coded timing values */
1458 for (bl = 0; bl < (NUM_BYTE_LANES / bl_divisor); bl++)
1459 set_rcvn(ch, rk, bl, ddr_rcvn[PLATFORM_ID]);
1461 /* enable FIFORST */
1462 for (bl = 0; bl < (NUM_BYTE_LANES / bl_divisor); bl += 2) {
1463 mrc_alt_write_mask(DDRPHY,
1465 (bl >> 1) * DDRIODQ_BL_OFFSET +
1466 ch * DDRIODQ_CH_OFFSET,
1469 /* initialize the starting delay to 128 PI (cas +1 CLK) */
1470 for (bl = 0; bl < (NUM_BYTE_LANES / bl_divisor); bl++) {
1471 /* 1x CLK domain timing is cas-4 */
1472 delay[bl] = (4 + 1) * FULL_CLK;
1474 set_rcvn(ch, rk, bl, delay[bl]);
1477 /* now find the rising edge */
1478 find_rising_edge(mrc_params, delay, ch, rk, true);
1480 /* Now increase delay by 32 PI (1/4 CLK) to place in center of high pulse */
1481 for (bl = 0; bl < (NUM_BYTE_LANES / bl_divisor); bl++) {
1482 delay[bl] += QRTR_CLK;
1483 set_rcvn(ch, rk, bl, delay[bl]);
1485 /* Now decrement delay by 128 PI (1 CLK) until we sample a "0" */
1487 temp = sample_dqs(mrc_params, ch, rk, true);
1488 for (bl = 0; bl < (NUM_BYTE_LANES / bl_divisor); bl++) {
1489 if (temp & (1 << bl)) {
1490 if (delay[bl] >= FULL_CLK) {
1491 delay[bl] -= FULL_CLK;
1492 set_rcvn(ch, rk, bl, delay[bl]);
1494 /* not enough delay */
1495 training_message(ch, rk, bl);
1496 mrc_post_code(0xee, 0x50);
1500 } while (temp & 0xff);
1503 /* increment "num_ranks_enabled" */
1504 num_ranks_enabled++;
1505 /* Finally increment delay by 32 PI (1/4 CLK) to place in center of preamble */
1506 for (bl = 0; bl < (NUM_BYTE_LANES / bl_divisor); bl++) {
1507 delay[bl] += QRTR_CLK;
1508 /* add "delay[]" values to "final_delay[][]" for rolling average */
1509 final_delay[ch][bl] += delay[bl];
1510 /* set timing based on rolling average values */
1511 set_rcvn(ch, rk, bl, final_delay[ch][bl] / num_ranks_enabled);
1514 /* Finally increment delay by 32 PI (1/4 CLK) to place in center of preamble */
1515 for (bl = 0; bl < (NUM_BYTE_LANES / bl_divisor); bl++) {
1516 delay[bl] += QRTR_CLK;
1517 set_rcvn(ch, rk, bl, delay[bl]);
1521 /* disable FIFORST */
1522 for (bl = 0; bl < (NUM_BYTE_LANES / bl_divisor); bl += 2) {
1523 mrc_alt_write_mask(DDRPHY,
1525 (bl >> 1) * DDRIODQ_BL_OFFSET +
1526 ch * DDRIODQ_CH_OFFSET,
1536 /* restore original */
1537 msg_port_write(MEM_CTLR, DTR1, dtr1_save);
1544 * This function will perform the Write Levelling algorithm
1545 * (align WCLK and WDQS).
1547 * This algorithm will act on each rank in each channel separately.
1549 void wr_level(struct mrc_params *mrc_params)
1551 uint8_t ch; /* channel counter */
1552 uint8_t rk; /* rank counter */
1553 uint8_t bl; /* byte lane counter */
1554 uint8_t bl_divisor = (mrc_params->channel_width == X16) ? 2 : 1;
1557 /* used to find placement for rank2rank sharing configs */
1558 uint32_t final_delay[NUM_CHANNELS][NUM_BYTE_LANES];
1560 /* used to find placement for rank2rank sharing configs */
1561 uint32_t num_ranks_enabled = 0;
1567 /* determines stop condition for CRS_WR_LVL */
1568 bool all_edges_found;
1569 /* absolute PI value to be programmed on the byte lane */
1570 uint32_t delay[NUM_BYTE_LANES];
1572 * static makes it so the data is loaded in the heap once by shadow(),
1573 * where non-static copies the data onto the stack every time this
1574 * function is called
1576 uint32_t address; /* address to be checked during COARSE_WR_LVL */
1577 u32 dtr4, dtr4_save;
1582 /* wr_level starts */
1583 mrc_post_code(0x06, 0x00);
1586 /* need to set "final_delay[][]" elements to "0" */
1587 memset((void *)(final_delay), 0x00, (size_t)sizeof(final_delay));
1590 /* loop through each enabled channel */
1591 for (ch = 0; ch < NUM_CHANNELS; ch++) {
1592 if (mrc_params->channel_enables & (1 << ch)) {
1593 /* perform WRITE LEVELING algorithm on a per rank basis */
1594 for (rk = 0; rk < NUM_RANKS; rk++) {
1595 if (mrc_params->rank_enables & (1 << rk)) {
1597 * POST_CODE here indicates the current
1598 * rank and channel being calibrated
1600 mrc_post_code(0x06, 0x10 + ((ch << 4) | rk));
1603 for (bl = 0; bl < (NUM_BYTE_LANES / bl_divisor); bl++) {
1604 set_wdqs(ch, rk, bl, ddr_wdqs[PLATFORM_ID]);
1605 set_wdq(ch, rk, bl, ddr_wdqs[PLATFORM_ID] - QRTR_CLK);
1609 * perform a single PRECHARGE_ALL command to
1610 * make DRAM state machine go to IDLE state
1612 dram_init_command(DCMD_PREA(rk));
1615 * enable Write Levelling Mode
1616 * (EMRS1 w/ Write Levelling Mode Enable)
1618 dram_init_command(DCMD_MRS1(rk, 0x82));
1621 * set ODT DRAM Full Time Termination
1625 dtr4 = msg_port_read(MEM_CTLR, DTR4);
1627 dtr4 |= DTR4_ODTDIS;
1628 msg_port_write(MEM_CTLR, DTR4, dtr4);
1630 for (bl = 0; bl < (NUM_BYTE_LANES / bl_divisor) / 2; bl++) {
1632 * Enable Sandy Bridge Mode (WDQ Tri-State) &
1633 * Ensure 5 WDQS pulses during Write Leveling
1635 mrc_alt_write_mask(DDRPHY,
1636 DQCTL + DDRIODQ_BL_OFFSET * bl + DDRIODQ_CH_OFFSET * ch,
1641 /* Write Leveling Mode enabled in IO */
1642 mrc_alt_write_mask(DDRPHY,
1643 CCDDR3RESETCTL + DDRIOCCC_CH_OFFSET * ch,
1646 /* Initialize the starting delay to WCLK */
1647 for (bl = 0; bl < (NUM_BYTE_LANES / bl_divisor); bl++) {
1652 delay[bl] = get_wclk(ch, rk);
1654 set_wdqs(ch, rk, bl, delay[bl]);
1657 /* now find the rising edge */
1658 find_rising_edge(mrc_params, delay, ch, rk, false);
1660 /* disable Write Levelling Mode */
1661 mrc_alt_write_mask(DDRPHY,
1662 CCDDR3RESETCTL + DDRIOCCC_CH_OFFSET * ch,
1665 for (bl = 0; bl < (NUM_BYTE_LANES / bl_divisor) / 2; bl++) {
1666 /* Disable Sandy Bridge Mode & Ensure 4 WDQS pulses during normal operation */
1667 mrc_alt_write_mask(DDRPHY,
1668 DQCTL + DDRIODQ_BL_OFFSET * bl + DDRIODQ_CH_OFFSET * ch,
1673 /* restore original DTR4 */
1674 msg_port_write(MEM_CTLR, DTR4, dtr4_save);
1677 * restore original value
1678 * (Write Levelling Mode Disable)
1680 dram_init_command(DCMD_MRS1(rk, mrc_params->mrs1));
1683 * perform a single PRECHARGE_ALL command to
1684 * make DRAM state machine go to IDLE state
1686 dram_init_command(DCMD_PREA(rk));
1688 mrc_post_code(0x06, 0x30 + ((ch << 4) | rk));
1691 * COARSE WRITE LEVEL:
1692 * check that we're on the correct clock edge
1695 /* hte reconfiguration request */
1696 mrc_params->hte_setup = 1;
1698 /* start CRS_WR_LVL with WDQS = WDQS + 128 PI */
1699 for (bl = 0; bl < (NUM_BYTE_LANES / bl_divisor); bl++) {
1700 delay[bl] = get_wdqs(ch, rk, bl) + FULL_CLK;
1701 set_wdqs(ch, rk, bl, delay[bl]);
1703 * program WDQ timings based on WDQS
1704 * (WDQ = WDQS - 32 PI)
1706 set_wdq(ch, rk, bl, (delay[bl] - QRTR_CLK));
1709 /* get an address in the targeted channel/rank */
1710 address = get_addr(ch, rk);
1712 uint32_t coarse_result = 0x00;
1713 uint32_t coarse_result_mask = byte_lane_mask(mrc_params);
1715 all_edges_found = true;
1717 mrc_params->hte_setup = 1;
1718 coarse_result = check_rw_coarse(mrc_params, address);
1720 /* check for failures and margin the byte lane back 128 PI (1 CLK) */
1721 for (bl = 0; bl < NUM_BYTE_LANES / bl_divisor; bl++) {
1722 if (coarse_result & (coarse_result_mask << bl)) {
1723 all_edges_found = false;
1724 delay[bl] -= FULL_CLK;
1725 set_wdqs(ch, rk, bl, delay[bl]);
1726 /* program WDQ timings based on WDQS (WDQ = WDQS - 32 PI) */
1727 set_wdq(ch, rk, bl, delay[bl] - QRTR_CLK);
1730 } while (!all_edges_found);
1733 /* increment "num_ranks_enabled" */
1734 num_ranks_enabled++;
1735 /* accumulate "final_delay[][]" values from "delay[]" values for rolling average */
1736 for (bl = 0; bl < NUM_BYTE_LANES / bl_divisor; bl++) {
1737 final_delay[ch][bl] += delay[bl];
1738 set_wdqs(ch, rk, bl, final_delay[ch][bl] / num_ranks_enabled);
1739 /* program WDQ timings based on WDQS (WDQ = WDQS - 32 PI) */
1740 set_wdq(ch, rk, bl, final_delay[ch][bl] / num_ranks_enabled - QRTR_CLK);
1752 void prog_page_ctrl(struct mrc_params *mrc_params)
1758 dpmc0 = msg_port_read(MEM_CTLR, DPMC0);
1759 dpmc0 &= ~DPMC0_PCLSTO_MASK;
1761 dpmc0 |= DPMC0_PREAPWDEN;
1762 msg_port_write(MEM_CTLR, DPMC0, dpmc0);
1766 * This function will perform the READ TRAINING Algorithm on all
1767 * channels/ranks/byte_lanes simultaneously to minimize execution time.
1769 * The idea here is to train the VREF and RDQS (and eventually RDQ) values
1770 * to achieve maximum READ margins. The algorithm will first determine the
1771 * X coordinate (RDQS setting). This is done by collapsing the VREF eye
1772 * until we find a minimum required RDQS eye for VREF_MIN and VREF_MAX.
1773 * Then we take the averages of the RDQS eye at VREF_MIN and VREF_MAX,
1774 * then average those; this will be the final X coordinate. The algorithm
1775 * will then determine the Y coordinate (VREF setting). This is done by
1776 * collapsing the RDQS eye until we find a minimum required VREF eye for
1777 * RDQS_MIN and RDQS_MAX. Then we take the averages of the VREF eye at
1778 * RDQS_MIN and RDQS_MAX, then average those; this will be the final Y
1781 * NOTE: this algorithm assumes the eye curves have a one-to-one relationship,
1782 * meaning for each X the curve has only one Y and vice-a-versa.
1784 void rd_train(struct mrc_params *mrc_params)
1786 uint8_t ch; /* channel counter */
1787 uint8_t rk; /* rank counter */
1788 uint8_t bl; /* byte lane counter */
1789 uint8_t bl_divisor = (mrc_params->channel_width == X16) ? 2 : 1;
1792 uint8_t side_x; /* tracks LEFT/RIGHT approach vectors */
1793 uint8_t side_y; /* tracks BOTTOM/TOP approach vectors */
1794 /* X coordinate data (passing RDQS values) for approach vectors */
1795 uint8_t x_coordinate[2][2][NUM_CHANNELS][NUM_RANKS][NUM_BYTE_LANES];
1796 /* Y coordinate data (passing VREF values) for approach vectors */
1797 uint8_t y_coordinate[2][2][NUM_CHANNELS][NUM_BYTE_LANES];
1798 /* centered X (RDQS) */
1799 uint8_t x_center[NUM_CHANNELS][NUM_RANKS][NUM_BYTE_LANES];
1800 /* centered Y (VREF) */
1801 uint8_t y_center[NUM_CHANNELS][NUM_BYTE_LANES];
1802 uint32_t address; /* target address for check_bls_ex() */
1803 uint32_t result; /* result of check_bls_ex() */
1804 uint32_t bl_mask; /* byte lane mask for result checking */
1806 /* used to find placement for rank2rank sharing configs */
1807 uint32_t final_delay[NUM_CHANNELS][NUM_BYTE_LANES];
1808 /* used to find placement for rank2rank sharing configs */
1809 uint32_t num_ranks_enabled = 0;
1813 /* rd_train starts */
1814 mrc_post_code(0x07, 0x00);
1819 for (ch = 0; ch < NUM_CHANNELS; ch++) {
1820 if (mrc_params->channel_enables & (1 << ch)) {
1821 for (rk = 0; rk < NUM_RANKS; rk++) {
1822 if (mrc_params->rank_enables & (1 << rk)) {
1824 bl < NUM_BYTE_LANES / bl_divisor;
1826 set_rdqs(ch, rk, bl, ddr_rdqs[PLATFORM_ID]);
1833 /* initialize x/y_coordinate arrays */
1834 for (ch = 0; ch < NUM_CHANNELS; ch++) {
1835 if (mrc_params->channel_enables & (1 << ch)) {
1836 for (rk = 0; rk < NUM_RANKS; rk++) {
1837 if (mrc_params->rank_enables & (1 << rk)) {
1839 bl < NUM_BYTE_LANES / bl_divisor;
1842 x_coordinate[L][B][ch][rk][bl] = RDQS_MIN;
1843 x_coordinate[R][B][ch][rk][bl] = RDQS_MAX;
1844 x_coordinate[L][T][ch][rk][bl] = RDQS_MIN;
1845 x_coordinate[R][T][ch][rk][bl] = RDQS_MAX;
1847 y_coordinate[L][B][ch][bl] = VREF_MIN;
1848 y_coordinate[R][B][ch][bl] = VREF_MIN;
1849 y_coordinate[L][T][ch][bl] = VREF_MAX;
1850 y_coordinate[R][T][ch][bl] = VREF_MAX;
1857 /* initialize other variables */
1858 bl_mask = byte_lane_mask(mrc_params);
1859 address = get_addr(0, 0);
1862 /* need to set "final_delay[][]" elements to "0" */
1863 memset((void *)(final_delay), 0x00, (size_t)sizeof(final_delay));
1866 /* look for passing coordinates */
1867 for (side_y = B; side_y <= T; side_y++) {
1868 for (side_x = L; side_x <= R; side_x++) {
1869 mrc_post_code(0x07, 0x10 + side_y * 2 + side_x);
1871 /* find passing values */
1872 for (ch = 0; ch < NUM_CHANNELS; ch++) {
1873 if (mrc_params->channel_enables & (0x1 << ch)) {
1874 for (rk = 0; rk < NUM_RANKS; rk++) {
1875 if (mrc_params->rank_enables &
1877 /* set x/y_coordinate search starting settings */
1879 bl < NUM_BYTE_LANES / bl_divisor;
1881 set_rdqs(ch, rk, bl,
1882 x_coordinate[side_x][side_y][ch][rk][bl]);
1884 y_coordinate[side_x][side_y][ch][bl]);
1887 /* get an address in the target channel/rank */
1888 address = get_addr(ch, rk);
1890 /* request HTE reconfiguration */
1891 mrc_params->hte_setup = 1;
1893 /* test the settings */
1895 /* result[07:00] == failing byte lane (MAX 8) */
1896 result = check_bls_ex(mrc_params, address);
1898 /* check for failures */
1899 if (result & 0xff) {
1900 /* at least 1 byte lane failed */
1901 for (bl = 0; bl < NUM_BYTE_LANES / bl_divisor; bl++) {
1904 /* adjust the RDQS values accordingly */
1906 x_coordinate[L][side_y][ch][rk][bl] += RDQS_STEP;
1908 x_coordinate[R][side_y][ch][rk][bl] -= RDQS_STEP;
1910 /* check that we haven't closed the RDQS_EYE too much */
1911 if ((x_coordinate[L][side_y][ch][rk][bl] > (RDQS_MAX - MIN_RDQS_EYE)) ||
1912 (x_coordinate[R][side_y][ch][rk][bl] < (RDQS_MIN + MIN_RDQS_EYE)) ||
1913 (x_coordinate[L][side_y][ch][rk][bl] ==
1914 x_coordinate[R][side_y][ch][rk][bl])) {
1916 * not enough RDQS margin available at this VREF
1917 * update VREF values accordingly
1920 y_coordinate[side_x][B][ch][bl] += VREF_STEP;
1922 y_coordinate[side_x][T][ch][bl] -= VREF_STEP;
1924 /* check that we haven't closed the VREF_EYE too much */
1925 if ((y_coordinate[side_x][B][ch][bl] > (VREF_MAX - MIN_VREF_EYE)) ||
1926 (y_coordinate[side_x][T][ch][bl] < (VREF_MIN + MIN_VREF_EYE)) ||
1927 (y_coordinate[side_x][B][ch][bl] == y_coordinate[side_x][T][ch][bl])) {
1928 /* VREF_EYE collapsed below MIN_VREF_EYE */
1929 training_message(ch, rk, bl);
1930 mrc_post_code(0xEE, 0x70 + side_y * 2 + side_x);
1932 /* update the VREF setting */
1933 set_vref(ch, bl, y_coordinate[side_x][side_y][ch][bl]);
1934 /* reset the X coordinate to begin the search at the new VREF */
1935 x_coordinate[side_x][side_y][ch][rk][bl] =
1936 (side_x == L) ? RDQS_MIN : RDQS_MAX;
1940 /* update the RDQS setting */
1941 set_rdqs(ch, rk, bl, x_coordinate[side_x][side_y][ch][rk][bl]);
1945 } while (result & 0xff);
1953 mrc_post_code(0x07, 0x20);
1955 /* find final RDQS (X coordinate) & final VREF (Y coordinate) */
1956 for (ch = 0; ch < NUM_CHANNELS; ch++) {
1957 if (mrc_params->channel_enables & (1 << ch)) {
1958 for (rk = 0; rk < NUM_RANKS; rk++) {
1959 if (mrc_params->rank_enables & (1 << rk)) {
1960 for (bl = 0; bl < (NUM_BYTE_LANES / bl_divisor); bl++) {
1966 "RDQS T/B eye rank%d lane%d : %d-%d %d-%d\n",
1968 x_coordinate[L][T][ch][rk][bl],
1969 x_coordinate[R][T][ch][rk][bl],
1970 x_coordinate[L][B][ch][rk][bl],
1971 x_coordinate[R][B][ch][rk][bl]);
1973 /* average the TOP side LEFT & RIGHT values */
1974 temp1 = (x_coordinate[R][T][ch][rk][bl] + x_coordinate[L][T][ch][rk][bl]) / 2;
1975 /* average the BOTTOM side LEFT & RIGHT values */
1976 temp2 = (x_coordinate[R][B][ch][rk][bl] + x_coordinate[L][B][ch][rk][bl]) / 2;
1977 /* average the above averages */
1978 x_center[ch][rk][bl] = (uint8_t) ((temp1 + temp2) / 2);
1982 "VREF R/L eye lane%d : %d-%d %d-%d\n",
1984 y_coordinate[R][B][ch][bl],
1985 y_coordinate[R][T][ch][bl],
1986 y_coordinate[L][B][ch][bl],
1987 y_coordinate[L][T][ch][bl]);
1989 /* average the RIGHT side TOP & BOTTOM values */
1990 temp1 = (y_coordinate[R][T][ch][bl] + y_coordinate[R][B][ch][bl]) / 2;
1991 /* average the LEFT side TOP & BOTTOM values */
1992 temp2 = (y_coordinate[L][T][ch][bl] + y_coordinate[L][B][ch][bl]) / 2;
1993 /* average the above averages */
1994 y_center[ch][bl] = (uint8_t) ((temp1 + temp2) / 2);
2002 /* perform an eye check */
2003 for (side_y = B; side_y <= T; side_y++) {
2004 for (side_x = L; side_x <= R; side_x++) {
2005 mrc_post_code(0x07, 0x30 + side_y * 2 + side_x);
2007 /* update the settings for the eye check */
2008 for (ch = 0; ch < NUM_CHANNELS; ch++) {
2009 if (mrc_params->channel_enables & (1 << ch)) {
2010 for (rk = 0; rk < NUM_RANKS; rk++) {
2011 if (mrc_params->rank_enables & (1 << rk)) {
2012 for (bl = 0; bl < NUM_BYTE_LANES / bl_divisor; bl++) {
2014 set_rdqs(ch, rk, bl, x_center[ch][rk][bl] - (MIN_RDQS_EYE / 2));
2016 set_rdqs(ch, rk, bl, x_center[ch][rk][bl] + (MIN_RDQS_EYE / 2));
2019 set_vref(ch, bl, y_center[ch][bl] - (MIN_VREF_EYE / 2));
2021 set_vref(ch, bl, y_center[ch][bl] + (MIN_VREF_EYE / 2));
2028 /* request HTE reconfiguration */
2029 mrc_params->hte_setup = 1;
2032 if (check_bls_ex(mrc_params, address) & 0xff) {
2033 /* one or more byte lanes failed */
2034 mrc_post_code(0xee, 0x74 + side_x * 2 + side_y);
2040 mrc_post_code(0x07, 0x40);
2042 /* set final placements */
2043 for (ch = 0; ch < NUM_CHANNELS; ch++) {
2044 if (mrc_params->channel_enables & (1 << ch)) {
2045 for (rk = 0; rk < NUM_RANKS; rk++) {
2046 if (mrc_params->rank_enables & (1 << rk)) {
2048 /* increment "num_ranks_enabled" */
2049 num_ranks_enabled++;
2051 for (bl = 0; bl < (NUM_BYTE_LANES / bl_divisor); bl++) {
2054 final_delay[ch][bl] += x_center[ch][rk][bl];
2055 set_rdqs(ch, rk, bl, final_delay[ch][bl] / num_ranks_enabled);
2057 set_rdqs(ch, rk, bl, x_center[ch][rk][bl]);
2060 set_vref(ch, bl, y_center[ch][bl]);
2072 * This function will perform the WRITE TRAINING Algorithm on all
2073 * channels/ranks/byte_lanes simultaneously to minimize execution time.
2075 * The idea here is to train the WDQ timings to achieve maximum WRITE margins.
2076 * The algorithm will start with WDQ at the current WDQ setting (tracks WDQS
2077 * in WR_LVL) +/- 32 PIs (+/- 1/4 CLK) and collapse the eye until all data
2078 * patterns pass. This is because WDQS will be aligned to WCLK by the
2079 * Write Leveling algorithm and WDQ will only ever have a 1/2 CLK window
2082 void wr_train(struct mrc_params *mrc_params)
2084 uint8_t ch; /* channel counter */
2085 uint8_t rk; /* rank counter */
2086 uint8_t bl; /* byte lane counter */
2087 uint8_t bl_divisor = (mrc_params->channel_width == X16) ? 2 : 1;
2090 uint8_t side; /* LEFT/RIGHT side indicator (0=L, 1=R) */
2091 uint32_t temp; /* temporary DWORD */
2092 /* 2 arrays, for L & R side passing delays */
2093 uint32_t delay[2][NUM_CHANNELS][NUM_RANKS][NUM_BYTE_LANES];
2094 uint32_t address; /* target address for check_bls_ex() */
2095 uint32_t result; /* result of check_bls_ex() */
2096 uint32_t bl_mask; /* byte lane mask for result checking */
2098 /* used to find placement for rank2rank sharing configs */
2099 uint32_t final_delay[NUM_CHANNELS][NUM_BYTE_LANES];
2100 /* used to find placement for rank2rank sharing configs */
2101 uint32_t num_ranks_enabled = 0;
2105 /* wr_train starts */
2106 mrc_post_code(0x08, 0x00);
2111 for (ch = 0; ch < NUM_CHANNELS; ch++) {
2112 if (mrc_params->channel_enables & (1 << ch)) {
2113 for (rk = 0; rk < NUM_RANKS; rk++) {
2114 if (mrc_params->rank_enables & (1 << rk)) {
2116 bl < NUM_BYTE_LANES / bl_divisor;
2118 set_wdq(ch, rk, bl, ddr_wdq[PLATFORM_ID]);
2125 /* initialize "delay" */
2126 for (ch = 0; ch < NUM_CHANNELS; ch++) {
2127 if (mrc_params->channel_enables & (1 << ch)) {
2128 for (rk = 0; rk < NUM_RANKS; rk++) {
2129 if (mrc_params->rank_enables & (1 << rk)) {
2131 bl < NUM_BYTE_LANES / bl_divisor;
2134 * want to start with
2135 * WDQ = (WDQS - QRTR_CLK)
2138 temp = get_wdqs(ch, rk, bl) - QRTR_CLK;
2139 delay[L][ch][rk][bl] = temp - QRTR_CLK;
2140 delay[R][ch][rk][bl] = temp + QRTR_CLK;
2147 /* initialize other variables */
2148 bl_mask = byte_lane_mask(mrc_params);
2149 address = get_addr(0, 0);
2152 /* need to set "final_delay[][]" elements to "0" */
2153 memset((void *)(final_delay), 0x00, (size_t)sizeof(final_delay));
2157 * start algorithm on the LEFT side and train each channel/bl
2158 * until no failures are observed, then repeat for the RIGHT side.
2160 for (side = L; side <= R; side++) {
2161 mrc_post_code(0x08, 0x10 + side);
2163 /* set starting values */
2164 for (ch = 0; ch < NUM_CHANNELS; ch++) {
2165 if (mrc_params->channel_enables & (1 << ch)) {
2166 for (rk = 0; rk < NUM_RANKS; rk++) {
2167 if (mrc_params->rank_enables &
2170 bl < NUM_BYTE_LANES / bl_divisor;
2172 set_wdq(ch, rk, bl, delay[side][ch][rk][bl]);
2179 /* find passing values */
2180 for (ch = 0; ch < NUM_CHANNELS; ch++) {
2181 if (mrc_params->channel_enables & (1 << ch)) {
2182 for (rk = 0; rk < NUM_RANKS; rk++) {
2183 if (mrc_params->rank_enables &
2185 /* get an address in the target channel/rank */
2186 address = get_addr(ch, rk);
2188 /* request HTE reconfiguration */
2189 mrc_params->hte_setup = 1;
2191 /* check the settings */
2193 /* result[07:00] == failing byte lane (MAX 8) */
2194 result = check_bls_ex(mrc_params, address);
2195 /* check for failures */
2196 if (result & 0xff) {
2197 /* at least 1 byte lane failed */
2198 for (bl = 0; bl < NUM_BYTE_LANES / bl_divisor; bl++) {
2202 delay[L][ch][rk][bl] += WDQ_STEP;
2204 delay[R][ch][rk][bl] -= WDQ_STEP;
2206 /* check for algorithm failure */
2207 if (delay[L][ch][rk][bl] != delay[R][ch][rk][bl]) {
2210 * update delay setting
2213 delay[side][ch][rk][bl]);
2216 * no margin available
2217 * notify the user and halt
2219 training_message(ch, rk, bl);
2220 mrc_post_code(0xee, 0x80 + side);
2225 /* stop when all byte lanes pass */
2226 } while (result & 0xff);
2233 /* program WDQ to the middle of passing window */
2234 for (ch = 0; ch < NUM_CHANNELS; ch++) {
2235 if (mrc_params->channel_enables & (1 << ch)) {
2236 for (rk = 0; rk < NUM_RANKS; rk++) {
2237 if (mrc_params->rank_enables & (1 << rk)) {
2239 /* increment "num_ranks_enabled" */
2240 num_ranks_enabled++;
2242 for (bl = 0; bl < NUM_BYTE_LANES / bl_divisor; bl++) {
2244 "WDQ eye rank%d lane%d : %d-%d\n",
2246 delay[L][ch][rk][bl],
2247 delay[R][ch][rk][bl]);
2249 temp = (delay[R][ch][rk][bl] + delay[L][ch][rk][bl]) / 2;
2252 final_delay[ch][bl] += temp;
2254 final_delay[ch][bl] / num_ranks_enabled);
2256 set_wdq(ch, rk, bl, temp);
2269 * This function will store relevant timing data
2271 * This data will be used on subsequent boots to speed up boot times
2272 * and is required for Suspend To RAM capabilities.
2274 void store_timings(struct mrc_params *mrc_params)
2277 struct mrc_timings *mt = &mrc_params->timings;
2279 for (ch = 0; ch < NUM_CHANNELS; ch++) {
2280 for (rk = 0; rk < NUM_RANKS; rk++) {
2281 for (bl = 0; bl < NUM_BYTE_LANES; bl++) {
2282 mt->rcvn[ch][rk][bl] = get_rcvn(ch, rk, bl);
2283 mt->rdqs[ch][rk][bl] = get_rdqs(ch, rk, bl);
2284 mt->wdqs[ch][rk][bl] = get_wdqs(ch, rk, bl);
2285 mt->wdq[ch][rk][bl] = get_wdq(ch, rk, bl);
2288 mt->vref[ch][bl] = get_vref(ch, bl);
2291 mt->wctl[ch][rk] = get_wctl(ch, rk);
2294 mt->wcmd[ch] = get_wcmd(ch);
2297 /* need to save for a case of changing frequency after warm reset */
2298 mt->ddr_speed = mrc_params->ddr_speed;
2302 * The purpose of this function is to ensure the SEC comes out of reset
2303 * and IA initiates the SEC enabling Memory Scrambling.
2305 void enable_scrambling(struct mrc_params *mrc_params)
2310 if (mrc_params->scrambling_enables == 0)
2315 /* 32 bit seed is always stored in BIOS NVM */
2316 lfsr = mrc_params->timings.scrambler_seed;
2318 if (mrc_params->boot_mode == BM_COLD) {
2320 * factory value is 0 and in first boot,
2321 * a clock based seed is loaded.
2325 * get seed from system clock
2326 * and make sure it is not all 1's
2328 lfsr = rdtsc() & 0x0fffffff;
2331 * Need to replace scrambler
2333 * get next 32bit LFSR 16 times which is the last
2334 * part of the previous scrambler vector
2336 for (i = 0; i < 16; i++)
2341 mrc_params->timings.scrambler_seed = lfsr;
2345 * In warm boot or S3 exit, we have the previous seed.
2346 * In cold boot, we have the last 32bit LFSR which is the new seed.
2348 lfsr32(&lfsr); /* shift to next value */
2349 msg_port_write(MEM_CTLR, SCRMSEED, (lfsr & 0x0003ffff));
2351 for (i = 0; i < 2; i++)
2352 msg_port_write(MEM_CTLR, SCRMLO + i, (lfsr & 0xaaaaaaaa));
2358 * Configure MCU Power Management Control Register
2359 * and Scheduler Control Register
2361 void prog_ddr_control(struct mrc_params *mrc_params)
2368 dsch = msg_port_read(MEM_CTLR, DSCH);
2369 dsch &= ~(DSCH_OOODIS | DSCH_OOOST3DIS | DSCH_NEWBYPDIS);
2370 msg_port_write(MEM_CTLR, DSCH, dsch);
2372 dpmc0 = msg_port_read(MEM_CTLR, DPMC0);
2373 dpmc0 &= ~DPMC0_DISPWRDN;
2374 dpmc0 |= (mrc_params->power_down_disable << 25);
2375 dpmc0 &= ~DPMC0_CLKGTDIS;
2376 dpmc0 &= ~DPMC0_PCLSTO_MASK;
2378 dpmc0 |= DPMC0_PREAPWDEN;
2379 msg_port_write(MEM_CTLR, DPMC0, dpmc0);
2381 /* CMDTRIST = 2h - CMD/ADDR are tristated when no valid command */
2382 mrc_write_mask(MEM_CTLR, DPMC1, 0x20, 0x30);
2388 * After training complete configure MCU Rank Population Register
2389 * specifying: ranks enabled, device width, density, address mode
2391 void prog_dra_drb(struct mrc_params *mrc_params)
2395 u8 density = mrc_params->params.density;
2399 dco = msg_port_read(MEM_CTLR, DCO);
2401 msg_port_write(MEM_CTLR, DCO, dco);
2404 if (mrc_params->rank_enables & 1)
2406 if (mrc_params->rank_enables & 2)
2408 if (mrc_params->dram_width == X16) {
2414 * Density encoding in struct dram_params: 0=512Mb, 1=Gb, 2=2Gb, 3=4Gb
2415 * has to be mapped RANKDENSx encoding (0=1Gb)
2420 drp |= ((density - 1) << 6);
2421 drp |= ((density - 1) << 11);
2423 /* Address mode can be overwritten if ECC enabled */
2424 drp |= (mrc_params->address_mode << 14);
2426 msg_port_write(MEM_CTLR, DRP, drp);
2430 msg_port_write(MEM_CTLR, DCO, dco);
2435 /* Send DRAM wake command */
2436 void perform_wake(struct mrc_params *mrc_params)
2440 dram_wake_command();
2446 * Configure refresh rate and short ZQ calibration interval
2447 * Activate dynamic self refresh
2449 void change_refresh_period(struct mrc_params *mrc_params)
2457 drfc = msg_port_read(MEM_CTLR, DRFC);
2458 drfc &= ~DRFC_TREFI_MASK;
2459 drfc |= (mrc_params->refresh_rate << 12);
2460 drfc |= DRFC_REFDBTCLR;
2461 msg_port_write(MEM_CTLR, DRFC, drfc);
2463 dcal = msg_port_read(MEM_CTLR, DCAL);
2464 dcal &= ~DCAL_ZQCINT_MASK;
2465 dcal |= (3 << 8); /* 63ms */
2466 msg_port_write(MEM_CTLR, DCAL, dcal);
2468 dpmc0 = msg_port_read(MEM_CTLR, DPMC0);
2469 dpmc0 |= (DPMC0_DYNSREN | DPMC0_ENPHYCLKGATE);
2470 msg_port_write(MEM_CTLR, DPMC0, dpmc0);
2476 * Configure DDRPHY for Auto-Refresh, Periodic Compensations,
2477 * Dynamic Diff-Amp, ZQSPERIOD, Auto-Precharge, CKE Power-Down
2479 void set_auto_refresh(struct mrc_params *mrc_params)
2484 uint32_t bl_divisor = 1;
2490 * Enable Auto-Refresh, Periodic Compensations, Dynamic Diff-Amp,
2491 * ZQSPERIOD, Auto-Precharge, CKE Power-Down
2493 for (channel = 0; channel < NUM_CHANNELS; channel++) {
2494 if (mrc_params->channel_enables & (1 << channel)) {
2495 /* Enable Periodic RCOMPS */
2496 mrc_alt_write_mask(DDRPHY, CMPCTRL, 2, 2);
2498 /* Enable Dynamic DiffAmp & Set Read ODT Value */
2499 switch (mrc_params->rd_odt_value) {
2501 temp = 0x3f; /* OFF */
2504 temp = 0x00; /* Auto */
2508 for (bl = 0; bl < (NUM_BYTE_LANES / bl_divisor) / 2; bl++) {
2509 /* Override: DIFFAMP, ODT */
2510 mrc_alt_write_mask(DDRPHY,
2511 B0OVRCTL + bl * DDRIODQ_BL_OFFSET +
2512 channel * DDRIODQ_CH_OFFSET,
2516 /* Override: DIFFAMP, ODT */
2517 mrc_alt_write_mask(DDRPHY,
2518 B1OVRCTL + bl * DDRIODQ_BL_OFFSET +
2519 channel * DDRIODQ_CH_OFFSET,
2524 /* Issue ZQCS command */
2525 for (rank = 0; rank < NUM_RANKS; rank++) {
2526 if (mrc_params->rank_enables & (1 << rank))
2527 dram_init_command(DCMD_ZQCS(rank));
2538 * Depending on configuration enables ECC support
2540 * Available memory size is decreased, and updated with 0s
2541 * in order to clear error status. Address mode 2 forced.
2543 void ecc_enable(struct mrc_params *mrc_params)
2549 if (mrc_params->ecc_enables == 0)
2554 /* Configuration required in ECC mode */
2555 drp = msg_port_read(MEM_CTLR, DRP);
2556 drp &= ~DRP_ADDRMAP_MASK;
2557 drp |= DRP_ADDRMAP_MAP1;
2558 drp |= DRP_PRI64BSPLITEN;
2559 msg_port_write(MEM_CTLR, DRP, drp);
2561 /* Disable new request bypass */
2562 dsch = msg_port_read(MEM_CTLR, DSCH);
2563 dsch |= DSCH_NEWBYPDIS;
2564 msg_port_write(MEM_CTLR, DSCH, dsch);
2567 ecc_ctrl = (DECCCTRL_SBEEN | DECCCTRL_DBEEN | DECCCTRL_ENCBGEN);
2568 msg_port_write(MEM_CTLR, DECCCTRL, ecc_ctrl);
2570 /* Assume 8 bank memory, one bank is gone for ECC */
2571 mrc_params->mem_size -= mrc_params->mem_size / 8;
2573 /* For S3 resume memory content has to be preserved */
2574 if (mrc_params->boot_mode != BM_S3) {
2576 hte_mem_init(mrc_params, MRC_MEM_INIT);
2584 * Execute memory test
2585 * if error detected it is indicated in mrc_params->status
2587 void memory_test(struct mrc_params *mrc_params)
2589 uint32_t result = 0;
2594 result = hte_mem_init(mrc_params, MRC_MEM_TEST);
2597 DPF(D_INFO, "Memory test result %x\n", result);
2598 mrc_params->status = ((result == 0) ? MRC_SUCCESS : MRC_E_MEMTEST);
2602 /* Lock MCU registers at the end of initialization sequence */
2603 void lock_registers(struct mrc_params *mrc_params)
2609 dco = msg_port_read(MEM_CTLR, DCO);
2610 dco &= ~(DCO_PMICTL | DCO_PMIDIS);
2611 dco |= (DCO_DRPLOCK | DCO_CPGCLOCK);
2612 msg_port_write(MEM_CTLR, DCO, dco);