2 * Copyright (C) 2013, Intel Corporation
3 * Copyright (C) 2015, Bin Meng <bmeng.cn@gmail.com>
5 * Ported from Intel released Quark UEFI BIOS
6 * QuarkSocPkg/QuarkNorthCluster/MemoryInit/Pei
8 * SPDX-License-Identifier: Intel
13 #include <asm/arch/device.h>
14 #include <asm/arch/mrc.h>
15 #include <asm/arch/msg_port.h>
20 /* t_ck clock period in picoseconds per speed index 800, 1066, 1333 */
21 static const uint32_t t_ck[3] = {
27 /* Global variables */
28 static const uint16_t ddr_wclk[] = {193, 158};
30 static const uint16_t ddr_wctl[] = {1, 217};
33 static const uint16_t ddr_wcmd[] = {1, 220};
37 static const uint16_t ddr_rcvn[] = {129, 498};
41 static const uint16_t ddr_wdqs[] = {65, 289};
45 static const uint8_t ddr_rdqs[] = {32, 24};
49 static const uint16_t ddr_wdq[] = {32, 257};
52 /* Stop self refresh driven by MCU */
53 void clear_self_refresh(struct mrc_params *mrc_params)
57 /* clear the PMSTS Channel Self Refresh bits */
58 mrc_write_mask(MEM_CTLR, PMSTS, PMSTS_DISR, PMSTS_DISR);
63 /* It will initialize timing registers in the MCU (DTR0..DTR4) */
64 void prog_ddr_timing_control(struct mrc_params *mrc_params)
67 uint8_t trp, trcd, tras, twr, twtr, trrd, trtp, tfaw;
69 u32 dtr0, dtr1, dtr2, dtr3, dtr4;
75 mrc_post_code(0x02, 0x00);
77 dtr0 = msg_port_read(MEM_CTLR, DTR0);
78 dtr1 = msg_port_read(MEM_CTLR, DTR1);
79 dtr2 = msg_port_read(MEM_CTLR, DTR2);
80 dtr3 = msg_port_read(MEM_CTLR, DTR3);
81 dtr4 = msg_port_read(MEM_CTLR, DTR4);
83 tck = t_ck[mrc_params->ddr_speed]; /* Clock in picoseconds */
84 tcl = mrc_params->params.cl; /* CAS latency in clocks */
85 trp = tcl; /* Per CAT MRC */
86 trcd = tcl; /* Per CAT MRC */
87 tras = MCEIL(mrc_params->params.ras, tck);
89 /* Per JEDEC: tWR=15000ps DDR2/3 from 800-1600 */
90 twr = MCEIL(15000, tck);
92 twtr = MCEIL(mrc_params->params.wtr, tck);
93 trrd = MCEIL(mrc_params->params.rrd, tck);
94 trtp = 4; /* Valid for 800 and 1066, use 5 for 1333 */
95 tfaw = MCEIL(mrc_params->params.faw, tck);
97 wl = 5 + mrc_params->ddr_speed;
99 dtr0 &= ~DTR0_DFREQ_MASK;
100 dtr0 |= mrc_params->ddr_speed;
101 dtr0 &= ~DTR0_TCL_MASK;
103 dtr0 |= ((tcl - 5) << 12);
104 dtr0 &= ~DTR0_TRP_MASK;
105 dtr0 |= ((trp - 5) << 4); /* 5 bit DRAM Clock */
106 dtr0 &= ~DTR0_TRCD_MASK;
107 dtr0 |= ((trcd - 5) << 8); /* 5 bit DRAM Clock */
109 dtr1 &= ~DTR1_TWCL_MASK;
112 dtr1 &= ~DTR1_TWTP_MASK;
113 dtr1 |= ((wl + 4 + twr - 14) << 8); /* Change to tWTP */
114 dtr1 &= ~DTR1_TRTP_MASK;
115 dtr1 |= ((MMAX(trtp, 4) - 3) << 28); /* 4 bit DRAM Clock */
116 dtr1 &= ~DTR1_TRRD_MASK;
117 dtr1 |= ((trrd - 4) << 24); /* 4 bit DRAM Clock */
118 dtr1 &= ~DTR1_TCMD_MASK;
120 dtr1 &= ~DTR1_TRAS_MASK;
121 dtr1 |= ((tras - 14) << 20); /* 6 bit DRAM Clock */
122 dtr1 &= ~DTR1_TFAW_MASK;
123 dtr1 |= ((((tfaw + 1) >> 1) - 5) << 16);/* 4 bit DRAM Clock */
124 /* Set 4 Clock CAS to CAS delay (multi-burst) */
125 dtr1 &= ~DTR1_TCCD_MASK;
127 dtr2 &= ~DTR2_TRRDR_MASK;
129 dtr2 &= ~DTR2_TWWDR_MASK;
131 dtr2 &= ~DTR2_TRWDR_MASK;
134 dtr3 &= ~DTR3_TWRDR_MASK;
136 dtr3 &= ~DTR3_TXXXX_MASK;
139 dtr3 &= ~DTR3_TRWSR_MASK;
140 if (mrc_params->ddr_speed == DDRFREQ_800) {
141 /* Extended RW delay (+1) */
142 dtr3 |= ((tcl - 5 + 1) << 8);
143 } else if (mrc_params->ddr_speed == DDRFREQ_1066) {
144 /* Extended RW delay (+1) */
145 dtr3 |= ((tcl - 5 + 1) << 8);
148 dtr3 &= ~DTR3_TWRSR_MASK;
149 dtr3 |= ((4 + wl + twtr - 11) << 13);
151 dtr3 &= ~DTR3_TXP_MASK;
152 if (mrc_params->ddr_speed == DDRFREQ_800)
153 dtr3 |= ((MMAX(0, 1 - 1)) << 22);
155 dtr3 |= ((MMAX(0, 2 - 1)) << 22);
157 dtr4 &= ~DTR4_WRODTSTRT_MASK;
159 dtr4 &= ~DTR4_WRODTSTOP_MASK;
161 dtr4 &= ~DTR4_XXXX1_MASK;
162 dtr4 |= ((1 + tmp1 - tmp2 + 2) << 8);
163 dtr4 &= ~DTR4_XXXX2_MASK;
164 dtr4 |= ((1 + tmp1 - tmp2 + 2) << 12);
165 dtr4 &= ~(DTR4_ODTDIS | DTR4_TRGSTRDIS);
167 msg_port_write(MEM_CTLR, DTR0, dtr0);
168 msg_port_write(MEM_CTLR, DTR1, dtr1);
169 msg_port_write(MEM_CTLR, DTR2, dtr2);
170 msg_port_write(MEM_CTLR, DTR3, dtr3);
171 msg_port_write(MEM_CTLR, DTR4, dtr4);
176 /* Configure MCU before jedec init sequence */
177 void prog_decode_before_jedec(struct mrc_params *mrc_params)
187 /* Disable power saving features */
188 dpmc0 = msg_port_read(MEM_CTLR, DPMC0);
189 dpmc0 |= (DPMC0_CLKGTDIS | DPMC0_DISPWRDN);
190 dpmc0 &= ~DPMC0_PCLSTO_MASK;
191 dpmc0 &= ~DPMC0_DYNSREN;
192 msg_port_write(MEM_CTLR, DPMC0, dpmc0);
194 /* Disable out of order transactions */
195 dsch = msg_port_read(MEM_CTLR, DSCH);
196 dsch |= (DSCH_OOODIS | DSCH_NEWBYPDIS);
197 msg_port_write(MEM_CTLR, DSCH, dsch);
199 /* Disable issuing the REF command */
200 drfc = msg_port_read(MEM_CTLR, DRFC);
201 drfc &= ~DRFC_TREFI_MASK;
202 msg_port_write(MEM_CTLR, DRFC, drfc);
204 /* Disable ZQ calibration short */
205 dcal = msg_port_read(MEM_CTLR, DCAL);
206 dcal &= ~DCAL_ZQCINT_MASK;
207 dcal &= ~DCAL_SRXZQCL_MASK;
208 msg_port_write(MEM_CTLR, DCAL, dcal);
211 * Training performed in address mode 0, rank population has limited
212 * impact, however simulator complains if enabled non-existing rank.
215 if (mrc_params->rank_enables & 1)
217 if (mrc_params->rank_enables & 2)
219 msg_port_write(MEM_CTLR, DRP, drp);
225 * After Cold Reset, BIOS should set COLDWAKE bit to 1 before
226 * sending the WAKE message to the Dunit.
228 * For Standby Exit, or any other mode in which the DRAM is in
229 * SR, this bit must be set to 0.
231 void perform_ddr_reset(struct mrc_params *mrc_params)
235 /* Set COLDWAKE bit before sending the WAKE message */
236 mrc_write_mask(MEM_CTLR, DRMC, DRMC_COLDWAKE, DRMC_COLDWAKE);
238 /* Send wake command to DUNIT (MUST be done before JEDEC) */
241 /* Set default value */
242 msg_port_write(MEM_CTLR, DRMC,
243 mrc_params->rd_odt_value == 0 ? DRMC_ODTMODE : 0);
250 * This function performs some initialization on the DDRIO unit.
251 * This function is dependent on BOARD_ID, DDR_SPEED, and CHANNEL_ENABLES.
253 void ddrphy_init(struct mrc_params *mrc_params)
256 uint8_t ch; /* channel counter */
257 uint8_t rk; /* rank counter */
258 uint8_t bl_grp; /* byte lane group counter (2 BLs per module) */
259 uint8_t bl_divisor = 1; /* byte lane divisor */
260 /* For DDR3 --> 0 == 800, 1 == 1066, 2 == 1333 */
261 uint8_t speed = mrc_params->ddr_speed & 3;
267 cas = mrc_params->params.cl;
268 cwl = 5 + mrc_params->ddr_speed;
270 /* ddrphy_init starts */
271 mrc_post_code(0x03, 0x00);
275 * Make sure IOBUFACT is deasserted before initializing the DDR PHY
278 * Make sure WRPTRENABLE is deasserted before initializing the DDR PHY
280 for (ch = 0; ch < NUM_CHANNELS; ch++) {
281 if (mrc_params->channel_enables & (1 << ch)) {
282 /* Deassert DDRPHY Initialization Complete */
283 mrc_alt_write_mask(DDRPHY,
284 CMDPMCONFIG0 + ch * DDRIOCCC_CH_OFFSET,
285 ~(1 << 20), 1 << 20); /* SPID_INIT_COMPLETE=0 */
286 /* Deassert IOBUFACT */
287 mrc_alt_write_mask(DDRPHY,
288 CMDCFGREG0 + ch * DDRIOCCC_CH_OFFSET,
289 ~(1 << 2), 1 << 2); /* IOBUFACTRST_N=0 */
291 mrc_alt_write_mask(DDRPHY,
292 CMDPTRREG + ch * DDRIOCCC_CH_OFFSET,
293 ~(1 << 0), 1 << 0); /* WRPTRENABLE=0 */
297 /* Put PHY in reset */
298 mrc_alt_write_mask(DDRPHY, MASTERRSTN, 0, 1);
300 /* Initialize DQ01, DQ23, CMD, CLK-CTL, COMP modules */
303 mrc_post_code(0x03, 0x10);
304 for (ch = 0; ch < NUM_CHANNELS; ch++) {
305 if (mrc_params->channel_enables & (1 << ch)) {
308 bl_grp < (NUM_BYTE_LANES / bl_divisor) / 2;
310 /* Analog MUX select - IO2xCLKSEL */
311 mrc_alt_write_mask(DDRPHY,
313 bl_grp * DDRIODQ_BL_OFFSET +
314 ch * DDRIODQ_CH_OFFSET,
315 bl_grp ? 0 : (1 << 22), 1 << 22);
318 switch (mrc_params->rd_odt_value) {
334 mrc_alt_write_mask(DDRPHY,
336 bl_grp * DDRIODQ_BL_OFFSET +
337 ch * DDRIODQ_CH_OFFSET,
340 mrc_alt_write_mask(DDRPHY,
342 bl_grp * DDRIODQ_BL_OFFSET +
343 ch * DDRIODQ_CH_OFFSET,
346 /* Dynamic ODT/DIFFAMP */
347 temp = (cas << 24) | (cas << 16) |
348 (cas << 8) | (cas << 0);
364 /* Launch Time: ODT, DIFFAMP, ODT, DIFFAMP */
365 mrc_alt_write_mask(DDRPHY,
367 bl_grp * DDRIODQ_BL_OFFSET +
368 ch * DDRIODQ_CH_OFFSET,
373 temp = (0x06 << 16) | (0x07 << 8);
376 temp = (0x07 << 16) | (0x08 << 8);
379 temp = (0x09 << 16) | (0x0a << 8);
382 temp = (0x0a << 16) | (0x0b << 8);
386 /* On Duration: ODT, DIFFAMP */
387 mrc_alt_write_mask(DDRPHY,
389 bl_grp * DDRIODQ_BL_OFFSET +
390 ch * DDRIODQ_CH_OFFSET,
392 /* On Duration: ODT, DIFFAMP */
393 mrc_alt_write_mask(DDRPHY,
395 bl_grp * DDRIODQ_BL_OFFSET +
396 ch * DDRIODQ_CH_OFFSET,
399 switch (mrc_params->rd_odt_value) {
401 /* override DIFFAMP=on, ODT=off */
402 temp = (0x3f << 16) | (0x3f << 10);
405 /* override DIFFAMP=on, ODT=on */
406 temp = (0x3f << 16) | (0x2a << 10);
410 /* Override: DIFFAMP, ODT */
411 mrc_alt_write_mask(DDRPHY,
413 bl_grp * DDRIODQ_BL_OFFSET +
414 ch * DDRIODQ_CH_OFFSET,
416 /* Override: DIFFAMP, ODT */
417 mrc_alt_write_mask(DDRPHY,
419 bl_grp * DDRIODQ_BL_OFFSET +
420 ch * DDRIODQ_CH_OFFSET,
425 /* 1xCLK Domain Timings: tEDP,RCVEN,WDQS (PO) */
426 mrc_alt_write_mask(DDRPHY,
428 bl_grp * DDRIODQ_BL_OFFSET +
429 ch * DDRIODQ_CH_OFFSET,
430 ((cas + 7) << 16) | ((cas - 4) << 8) |
431 ((cwl - 2) << 0), 0x003f1f1f);
432 mrc_alt_write_mask(DDRPHY,
434 bl_grp * DDRIODQ_BL_OFFSET +
435 ch * DDRIODQ_CH_OFFSET,
436 ((cas + 7) << 16) | ((cas - 4) << 8) |
437 ((cwl - 2) << 0), 0x003f1f1f);
439 /* RCVEN Bypass (PO) */
440 mrc_alt_write_mask(DDRPHY,
442 bl_grp * DDRIODQ_BL_OFFSET +
443 ch * DDRIODQ_CH_OFFSET,
445 mrc_alt_write_mask(DDRPHY,
447 bl_grp * DDRIODQ_BL_OFFSET +
448 ch * DDRIODQ_CH_OFFSET,
452 mrc_alt_write_mask(DDRPHY,
454 bl_grp * DDRIODQ_BL_OFFSET +
455 ch * DDRIODQ_CH_OFFSET,
457 mrc_alt_write_mask(DDRPHY,
459 bl_grp * DDRIODQ_BL_OFFSET +
460 ch * DDRIODQ_CH_OFFSET,
464 /* Internal Vref Code, Enable#, Ext_or_Int (1=Ext) */
465 mrc_alt_write_mask(DDRPHY,
467 bl_grp * DDRIODQ_BL_OFFSET +
468 ch * DDRIODQ_CH_OFFSET,
469 (0x03 << 2) | (0x0 << 1) | (0x0 << 0),
471 /* Internal Vref Code, Enable#, Ext_or_Int (1=Ext) */
472 mrc_alt_write_mask(DDRPHY,
474 bl_grp * DDRIODQ_BL_OFFSET +
475 ch * DDRIODQ_CH_OFFSET,
476 (0x03 << 2) | (0x0 << 1) | (0x0 << 0),
478 /* Per-Bit De-Skew Enable */
479 mrc_alt_write_mask(DDRPHY,
481 bl_grp * DDRIODQ_BL_OFFSET +
482 ch * DDRIODQ_CH_OFFSET,
484 /* Per-Bit De-Skew Enable */
485 mrc_alt_write_mask(DDRPHY,
487 bl_grp * DDRIODQ_BL_OFFSET +
488 ch * DDRIODQ_CH_OFFSET,
493 mrc_alt_write_mask(DDRPHY,
494 CMDOBSCKEBBCTL + ch * DDRIOCCC_CH_OFFSET,
497 /* Enable tristate control of cmd/address bus */
498 mrc_alt_write_mask(DDRPHY,
499 CMDCFGREG0 + ch * DDRIOCCC_CH_OFFSET,
503 mrc_alt_write_mask(DDRPHY,
504 CMDRCOMPODT + ch * DDRIOCCC_CH_OFFSET,
505 (0x03 << 5) | (0x03 << 0), 0x3ff);
507 /* CMDPM* registers must be programmed in this order */
509 /* Turn On Delays: SFR (regulator), MPLL */
510 mrc_alt_write_mask(DDRPHY,
511 CMDPMDLYREG4 + ch * DDRIOCCC_CH_OFFSET,
512 0xffffffff, 0xffffffff);
514 * Delays: ASSERT_IOBUFACT_to_ALLON0_for_PM_MSG_3,
515 * VREG (MDLL) Turn On, ALLON0_to_DEASSERT_IOBUFACT
516 * for_PM_MSG_gt0, MDLL Turn On
518 mrc_alt_write_mask(DDRPHY,
519 CMDPMDLYREG3 + ch * DDRIOCCC_CH_OFFSET,
520 0xfffff616, 0xffffffff);
521 /* MPLL Divider Reset Delays */
522 mrc_alt_write_mask(DDRPHY,
523 CMDPMDLYREG2 + ch * DDRIOCCC_CH_OFFSET,
524 0xffffffff, 0xffffffff);
525 /* Turn Off Delays: VREG, Staggered MDLL, MDLL, PI */
526 mrc_alt_write_mask(DDRPHY,
527 CMDPMDLYREG1 + ch * DDRIOCCC_CH_OFFSET,
528 0xffffffff, 0xffffffff);
529 /* Turn On Delays: MPLL, Staggered MDLL, PI, IOBUFACT */
530 mrc_alt_write_mask(DDRPHY,
531 CMDPMDLYREG0 + ch * DDRIOCCC_CH_OFFSET,
532 0xffffffff, 0xffffffff);
533 /* Allow PUnit signals */
534 mrc_alt_write_mask(DDRPHY,
535 CMDPMCONFIG0 + ch * DDRIOCCC_CH_OFFSET,
536 (0x6 << 8) | (0x1 << 6) | (0x4 << 0),
538 /* DLL_VREG Bias Trim, VREF Tuning for DLL_VREG */
539 mrc_alt_write_mask(DDRPHY,
540 CMDMDLLCTL + ch * DDRIOCCC_CH_OFFSET,
541 (0x3 << 4) | (0x7 << 0), 0x7f);
544 mrc_alt_write_mask(DDRPHY,
545 CCOBSCKEBBCTL + ch * DDRIOCCC_CH_OFFSET,
546 0, 1 << 24); /* CLKEBB */
547 /* Buffer Enable: CS,CKE,ODT,CLK */
548 mrc_alt_write_mask(DDRPHY,
549 CCCFGREG0 + ch * DDRIOCCC_CH_OFFSET,
552 mrc_alt_write_mask(DDRPHY,
553 CCRCOMPODT + ch * DDRIOCCC_CH_OFFSET,
554 (0x03 << 8) | (0x03 << 0), 0x00001f1f);
555 /* DLL_VREG Bias Trim, VREF Tuning for DLL_VREG */
556 mrc_alt_write_mask(DDRPHY,
557 CCMDLLCTL + ch * DDRIOCCC_CH_OFFSET,
558 (0x3 << 4) | (0x7 << 0), 0x7f);
561 * COMP (RON channel specific)
562 * - DQ/DQS/DM RON: 32 Ohm
563 * - CTRL/CMD RON: 27 Ohm
566 /* RCOMP Vref PU/PD */
567 mrc_alt_write_mask(DDRPHY,
568 DQVREFCH0 + ch * DDRCOMP_CH_OFFSET,
569 (0x08 << 24) | (0x03 << 16), 0x3f3f0000);
570 /* RCOMP Vref PU/PD */
571 mrc_alt_write_mask(DDRPHY,
572 CMDVREFCH0 + ch * DDRCOMP_CH_OFFSET,
573 (0x0C << 24) | (0x03 << 16), 0x3f3f0000);
574 /* RCOMP Vref PU/PD */
575 mrc_alt_write_mask(DDRPHY,
576 CLKVREFCH0 + ch * DDRCOMP_CH_OFFSET,
577 (0x0F << 24) | (0x03 << 16), 0x3f3f0000);
578 /* RCOMP Vref PU/PD */
579 mrc_alt_write_mask(DDRPHY,
580 DQSVREFCH0 + ch * DDRCOMP_CH_OFFSET,
581 (0x08 << 24) | (0x03 << 16), 0x3f3f0000);
582 /* RCOMP Vref PU/PD */
583 mrc_alt_write_mask(DDRPHY,
584 CTLVREFCH0 + ch * DDRCOMP_CH_OFFSET,
585 (0x0C << 24) | (0x03 << 16), 0x3f3f0000);
587 /* DQS Swapped Input Enable */
588 mrc_alt_write_mask(DDRPHY,
589 COMPEN1CH0 + ch * DDRCOMP_CH_OFFSET,
590 (1 << 19) | (1 << 17), 0xc00ac000);
592 /* ODT VREF = 1.5 x 274/360+274 = 0.65V (code of ~50) */
594 mrc_alt_write_mask(DDRPHY,
595 DQVREFCH0 + ch * DDRCOMP_CH_OFFSET,
596 (0x32 << 8) | (0x03 << 0), 0x00003f3f);
598 mrc_alt_write_mask(DDRPHY,
599 DQSVREFCH0 + ch * DDRCOMP_CH_OFFSET,
600 (0x32 << 8) | (0x03 << 0), 0x00003f3f);
602 mrc_alt_write_mask(DDRPHY,
603 CLKVREFCH0 + ch * DDRCOMP_CH_OFFSET,
604 (0x0E << 8) | (0x05 << 0), 0x00003f3f);
607 * Slew rate settings are frequency specific,
608 * numbers below are for 800Mhz (speed == 0)
609 * - DQ/DQS/DM/CLK SR: 4V/ns,
610 * - CTRL/CMD SR: 1.5V/ns
612 temp = (0x0e << 16) | (0x0e << 12) | (0x08 << 8) |
613 (0x0b << 4) | (0x0b << 0);
614 /* DCOMP Delay Select: CTL,CMD,CLK,DQS,DQ */
615 mrc_alt_write_mask(DDRPHY,
616 DLYSELCH0 + ch * DDRCOMP_CH_OFFSET,
618 /* TCO Vref CLK,DQS,DQ */
619 mrc_alt_write_mask(DDRPHY,
620 TCOVREFCH0 + ch * DDRCOMP_CH_OFFSET,
621 (0x05 << 16) | (0x05 << 8) | (0x05 << 0),
623 /* ODTCOMP CMD/CTL PU/PD */
624 mrc_alt_write_mask(DDRPHY,
625 CCBUFODTCH0 + ch * DDRCOMP_CH_OFFSET,
626 (0x03 << 8) | (0x03 << 0),
629 mrc_alt_write_mask(DDRPHY,
630 COMPEN0CH0 + ch * DDRCOMP_CH_OFFSET,
634 /* DQ COMP Overrides */
636 mrc_alt_write_mask(DDRPHY,
637 DQDRVPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
638 (1 << 31) | (0x0a << 16),
641 mrc_alt_write_mask(DDRPHY,
642 DQDRVPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
643 (1 << 31) | (0x0a << 16),
646 mrc_alt_write_mask(DDRPHY,
647 DQDLYPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
648 (1 << 31) | (0x10 << 16),
651 mrc_alt_write_mask(DDRPHY,
652 DQDLYPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
653 (1 << 31) | (0x10 << 16),
656 mrc_alt_write_mask(DDRPHY,
657 DQODTPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
658 (1 << 31) | (0x0b << 16),
661 mrc_alt_write_mask(DDRPHY,
662 DQODTPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
663 (1 << 31) | (0x0b << 16),
666 mrc_alt_write_mask(DDRPHY,
667 DQTCOPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
670 mrc_alt_write_mask(DDRPHY,
671 DQTCOPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
674 /* DQS COMP Overrides */
676 mrc_alt_write_mask(DDRPHY,
677 DQSDRVPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
678 (1 << 31) | (0x0a << 16),
681 mrc_alt_write_mask(DDRPHY,
682 DQSDRVPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
683 (1 << 31) | (0x0a << 16),
686 mrc_alt_write_mask(DDRPHY,
687 DQSDLYPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
688 (1 << 31) | (0x10 << 16),
691 mrc_alt_write_mask(DDRPHY,
692 DQSDLYPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
693 (1 << 31) | (0x10 << 16),
696 mrc_alt_write_mask(DDRPHY,
697 DQSODTPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
698 (1 << 31) | (0x0b << 16),
701 mrc_alt_write_mask(DDRPHY,
702 DQSODTPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
703 (1 << 31) | (0x0b << 16),
706 mrc_alt_write_mask(DDRPHY,
707 DQSTCOPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
710 mrc_alt_write_mask(DDRPHY,
711 DQSTCOPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
714 /* CLK COMP Overrides */
716 mrc_alt_write_mask(DDRPHY,
717 CLKDRVPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
718 (1 << 31) | (0x0c << 16),
721 mrc_alt_write_mask(DDRPHY,
722 CLKDRVPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
723 (1 << 31) | (0x0c << 16),
726 mrc_alt_write_mask(DDRPHY,
727 CLKDLYPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
728 (1 << 31) | (0x07 << 16),
731 mrc_alt_write_mask(DDRPHY,
732 CLKDLYPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
733 (1 << 31) | (0x07 << 16),
736 mrc_alt_write_mask(DDRPHY,
737 CLKODTPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
738 (1 << 31) | (0x0b << 16),
741 mrc_alt_write_mask(DDRPHY,
742 CLKODTPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
743 (1 << 31) | (0x0b << 16),
746 mrc_alt_write_mask(DDRPHY,
747 CLKTCOPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
750 mrc_alt_write_mask(DDRPHY,
751 CLKTCOPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
754 /* CMD COMP Overrides */
756 mrc_alt_write_mask(DDRPHY,
757 CMDDRVPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
758 (1 << 31) | (0x0d << 16),
761 mrc_alt_write_mask(DDRPHY,
762 CMDDRVPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
763 (1 << 31) | (0x0d << 16),
766 mrc_alt_write_mask(DDRPHY,
767 CMDDLYPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
768 (1 << 31) | (0x0a << 16),
771 mrc_alt_write_mask(DDRPHY,
772 CMDDLYPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
773 (1 << 31) | (0x0a << 16),
776 /* CTL COMP Overrides */
778 mrc_alt_write_mask(DDRPHY,
779 CTLDRVPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
780 (1 << 31) | (0x0d << 16),
783 mrc_alt_write_mask(DDRPHY,
784 CTLDRVPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
785 (1 << 31) | (0x0d << 16),
788 mrc_alt_write_mask(DDRPHY,
789 CTLDLYPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
790 (1 << 31) | (0x0a << 16),
793 mrc_alt_write_mask(DDRPHY,
794 CTLDLYPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
795 (1 << 31) | (0x0a << 16),
798 /* DQ TCOCOMP Overrides */
800 mrc_alt_write_mask(DDRPHY,
801 DQTCOPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
802 (1 << 31) | (0x1f << 16),
805 mrc_alt_write_mask(DDRPHY,
806 DQTCOPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
807 (1 << 31) | (0x1f << 16),
810 /* DQS TCOCOMP Overrides */
812 mrc_alt_write_mask(DDRPHY,
813 DQSTCOPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
814 (1 << 31) | (0x1f << 16),
817 mrc_alt_write_mask(DDRPHY,
818 DQSTCOPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
819 (1 << 31) | (0x1f << 16),
822 /* CLK TCOCOMP Overrides */
824 mrc_alt_write_mask(DDRPHY,
825 CLKTCOPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
826 (1 << 31) | (0x1f << 16),
829 mrc_alt_write_mask(DDRPHY,
830 CLKTCOPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
831 (1 << 31) | (0x1f << 16),
835 /* program STATIC delays */
837 set_wcmd(ch, ddr_wcmd[PLATFORM_ID]);
839 set_wcmd(ch, ddr_wclk[PLATFORM_ID] + HALF_CLK);
842 for (rk = 0; rk < NUM_RANKS; rk++) {
843 if (mrc_params->rank_enables & (1 << rk)) {
844 set_wclk(ch, rk, ddr_wclk[PLATFORM_ID]);
846 set_wctl(ch, rk, ddr_wctl[PLATFORM_ID]);
848 set_wctl(ch, rk, ddr_wclk[PLATFORM_ID] + HALF_CLK);
855 /* COMP (non channel specific) */
856 /* RCOMP: Dither PU Enable */
857 mrc_alt_write_mask(DDRPHY, DQANADRVPUCTL, 1 << 30, 1 << 30);
858 /* RCOMP: Dither PD Enable */
859 mrc_alt_write_mask(DDRPHY, DQANADRVPDCTL, 1 << 30, 1 << 30);
860 /* RCOMP: Dither PU Enable */
861 mrc_alt_write_mask(DDRPHY, CMDANADRVPUCTL, 1 << 30, 1 << 30);
862 /* RCOMP: Dither PD Enable */
863 mrc_alt_write_mask(DDRPHY, CMDANADRVPDCTL, 1 << 30, 1 << 30);
864 /* RCOMP: Dither PU Enable */
865 mrc_alt_write_mask(DDRPHY, CLKANADRVPUCTL, 1 << 30, 1 << 30);
866 /* RCOMP: Dither PD Enable */
867 mrc_alt_write_mask(DDRPHY, CLKANADRVPDCTL, 1 << 30, 1 << 30);
868 /* RCOMP: Dither PU Enable */
869 mrc_alt_write_mask(DDRPHY, DQSANADRVPUCTL, 1 << 30, 1 << 30);
870 /* RCOMP: Dither PD Enable */
871 mrc_alt_write_mask(DDRPHY, DQSANADRVPDCTL, 1 << 30, 1 << 30);
872 /* RCOMP: Dither PU Enable */
873 mrc_alt_write_mask(DDRPHY, CTLANADRVPUCTL, 1 << 30, 1 << 30);
874 /* RCOMP: Dither PD Enable */
875 mrc_alt_write_mask(DDRPHY, CTLANADRVPDCTL, 1 << 30, 1 << 30);
876 /* ODT: Dither PU Enable */
877 mrc_alt_write_mask(DDRPHY, DQANAODTPUCTL, 1 << 30, 1 << 30);
878 /* ODT: Dither PD Enable */
879 mrc_alt_write_mask(DDRPHY, DQANAODTPDCTL, 1 << 30, 1 << 30);
880 /* ODT: Dither PU Enable */
881 mrc_alt_write_mask(DDRPHY, CLKANAODTPUCTL, 1 << 30, 1 << 30);
882 /* ODT: Dither PD Enable */
883 mrc_alt_write_mask(DDRPHY, CLKANAODTPDCTL, 1 << 30, 1 << 30);
884 /* ODT: Dither PU Enable */
885 mrc_alt_write_mask(DDRPHY, DQSANAODTPUCTL, 1 << 30, 1 << 30);
886 /* ODT: Dither PD Enable */
887 mrc_alt_write_mask(DDRPHY, DQSANAODTPDCTL, 1 << 30, 1 << 30);
888 /* DCOMP: Dither PU Enable */
889 mrc_alt_write_mask(DDRPHY, DQANADLYPUCTL, 1 << 30, 1 << 30);
890 /* DCOMP: Dither PD Enable */
891 mrc_alt_write_mask(DDRPHY, DQANADLYPDCTL, 1 << 30, 1 << 30);
892 /* DCOMP: Dither PU Enable */
893 mrc_alt_write_mask(DDRPHY, CMDANADLYPUCTL, 1 << 30, 1 << 30);
894 /* DCOMP: Dither PD Enable */
895 mrc_alt_write_mask(DDRPHY, CMDANADLYPDCTL, 1 << 30, 1 << 30);
896 /* DCOMP: Dither PU Enable */
897 mrc_alt_write_mask(DDRPHY, CLKANADLYPUCTL, 1 << 30, 1 << 30);
898 /* DCOMP: Dither PD Enable */
899 mrc_alt_write_mask(DDRPHY, CLKANADLYPDCTL, 1 << 30, 1 << 30);
900 /* DCOMP: Dither PU Enable */
901 mrc_alt_write_mask(DDRPHY, DQSANADLYPUCTL, 1 << 30, 1 << 30);
902 /* DCOMP: Dither PD Enable */
903 mrc_alt_write_mask(DDRPHY, DQSANADLYPDCTL, 1 << 30, 1 << 30);
904 /* DCOMP: Dither PU Enable */
905 mrc_alt_write_mask(DDRPHY, CTLANADLYPUCTL, 1 << 30, 1 << 30);
906 /* DCOMP: Dither PD Enable */
907 mrc_alt_write_mask(DDRPHY, CTLANADLYPDCTL, 1 << 30, 1 << 30);
908 /* TCO: Dither PU Enable */
909 mrc_alt_write_mask(DDRPHY, DQANATCOPUCTL, 1 << 30, 1 << 30);
910 /* TCO: Dither PD Enable */
911 mrc_alt_write_mask(DDRPHY, DQANATCOPDCTL, 1 << 30, 1 << 30);
912 /* TCO: Dither PU Enable */
913 mrc_alt_write_mask(DDRPHY, CLKANATCOPUCTL, 1 << 30, 1 << 30);
914 /* TCO: Dither PD Enable */
915 mrc_alt_write_mask(DDRPHY, CLKANATCOPDCTL, 1 << 30, 1 << 30);
916 /* TCO: Dither PU Enable */
917 mrc_alt_write_mask(DDRPHY, DQSANATCOPUCTL, 1 << 30, 1 << 30);
918 /* TCO: Dither PD Enable */
919 mrc_alt_write_mask(DDRPHY, DQSANATCOPDCTL, 1 << 30, 1 << 30);
920 /* TCOCOMP: Pulse Count */
921 mrc_alt_write_mask(DDRPHY, TCOCNTCTRL, 1, 3);
922 /* ODT: CMD/CTL PD/PU */
923 mrc_alt_write_mask(DDRPHY, CHNLBUFSTATIC,
924 (0x03 << 24) | (0x03 << 16), 0x1f1f0000);
925 /* Set 1us counter */
926 mrc_alt_write_mask(DDRPHY, MSCNTR, 0x64, 0xff);
927 mrc_alt_write_mask(DDRPHY, LATCH1CTL, 0x1 << 28, 0x70000000);
929 /* Release PHY from reset */
930 mrc_alt_write_mask(DDRPHY, MASTERRSTN, 1, 1);
933 mrc_post_code(0x03, 0x11);
935 for (ch = 0; ch < NUM_CHANNELS; ch++) {
936 if (mrc_params->channel_enables & (1 << ch)) {
939 bl_grp < (NUM_BYTE_LANES / bl_divisor) / 2;
941 mrc_alt_write_mask(DDRPHY,
943 bl_grp * DDRIODQ_BL_OFFSET +
944 ch * DDRIODQ_CH_OFFSET,
946 1 << 13); /* Enable VREG */
951 mrc_alt_write_mask(DDRPHY, ECCMDLLCTL,
952 1 << 13, 1 << 13); /* Enable VREG */
955 mrc_alt_write_mask(DDRPHY,
956 CMDMDLLCTL + ch * DDRIOCCC_CH_OFFSET,
957 1 << 13, 1 << 13); /* Enable VREG */
960 mrc_alt_write_mask(DDRPHY,
961 CCMDLLCTL + ch * DDRIOCCC_CH_OFFSET,
962 1 << 13, 1 << 13); /* Enable VREG */
968 mrc_post_code(0x03, 0x12);
971 for (ch = 0; ch < NUM_CHANNELS; ch++) {
972 if (mrc_params->channel_enables & (1 << ch)) {
975 bl_grp < (NUM_BYTE_LANES / bl_divisor) / 2;
977 mrc_alt_write_mask(DDRPHY,
979 bl_grp * DDRIODQ_BL_OFFSET +
980 ch * DDRIODQ_CH_OFFSET,
982 1 << 17); /* Enable MCDLL */
987 mrc_alt_write_mask(DDRPHY, ECCMDLLCTL,
988 1 << 17, 1 << 17); /* Enable MCDLL */
991 mrc_alt_write_mask(DDRPHY,
992 CMDMDLLCTL + ch * DDRIOCCC_CH_OFFSET,
993 1 << 18, 1 << 18); /* Enable MCDLL */
996 mrc_alt_write_mask(DDRPHY,
997 CCMDLLCTL + ch * DDRIOCCC_CH_OFFSET,
998 1 << 18, 1 << 18); /* Enable MCDLL */
1004 mrc_post_code(0x03, 0x13);
1007 for (ch = 0; ch < NUM_CHANNELS; ch++) {
1008 if (mrc_params->channel_enables & (1 << ch)) {
1011 bl_grp < (NUM_BYTE_LANES / bl_divisor) / 2;
1013 #ifdef FORCE_16BIT_DDRIO
1015 (mrc_params->channel_width == X16)) ?
1021 mrc_alt_write_mask(DDRPHY,
1023 bl_grp * DDRIODQ_BL_OFFSET +
1024 ch * DDRIODQ_CH_OFFSET,
1028 mrc_alt_write_mask(DDRPHY,
1030 bl_grp * DDRIODQ_BL_OFFSET +
1031 ch * DDRIODQ_CH_OFFSET,
1034 /* Enable RXDLL Overrides BL0 */
1035 mrc_alt_write_mask(DDRPHY,
1037 bl_grp * DDRIODQ_BL_OFFSET +
1038 ch * DDRIODQ_CH_OFFSET,
1044 mrc_alt_write_mask(DDRPHY, ECCDLLTXCTL,
1049 mrc_alt_write_mask(DDRPHY,
1050 CMDDLLTXCTL + ch * DDRIOCCC_CH_OFFSET,
1057 mrc_post_code(0x03, 0x14);
1059 for (ch = 0; ch < NUM_CHANNELS; ch++) {
1060 if (mrc_params->channel_enables & (1 << ch)) {
1061 /* Host To Memory Clock Alignment (HMC) for 800/1066 */
1063 bl_grp < (NUM_BYTE_LANES / bl_divisor) / 2;
1065 /* CLK_ALIGN_MOD_ID */
1066 mrc_alt_write_mask(DDRPHY,
1068 bl_grp * DDRIODQ_BL_OFFSET +
1069 ch * DDRIODQ_CH_OFFSET,
1074 mrc_alt_write_mask(DDRPHY,
1075 ECCCLKALIGNREG2 + ch * DDRIODQ_CH_OFFSET,
1077 mrc_alt_write_mask(DDRPHY,
1078 CMDCLKALIGNREG2 + ch * DDRIODQ_CH_OFFSET,
1080 mrc_alt_write_mask(DDRPHY,
1081 CCCLKALIGNREG2 + ch * DDRIODQ_CH_OFFSET,
1083 mrc_alt_write_mask(DDRPHY,
1084 CMDCLKALIGNREG0 + ch * DDRIOCCC_CH_OFFSET,
1087 * NUM_SAMPLES, MAX_SAMPLES,
1088 * MACRO_PI_STEP, MICRO_PI_STEP
1090 mrc_alt_write_mask(DDRPHY,
1091 CMDCLKALIGNREG1 + ch * DDRIOCCC_CH_OFFSET,
1092 (0x18 << 16) | (0x10 << 8) |
1093 (0x8 << 2) | (0x1 << 0),
1095 /* TOTAL_NUM_MODULES, FIRST_U_PARTITION */
1096 mrc_alt_write_mask(DDRPHY,
1097 CMDCLKALIGNREG2 + ch * DDRIOCCC_CH_OFFSET,
1098 (0x10 << 16) | (0x4 << 8) | (0x2 << 4),
1101 /* START_CLK_ALIGN=1 */
1102 mrc_alt_write_mask(DDRPHY,
1103 CMDCLKALIGNREG0 + ch * DDRIOCCC_CH_OFFSET,
1105 while (msg_port_alt_read(DDRPHY,
1106 CMDCLKALIGNREG0 + ch * DDRIOCCC_CH_OFFSET) &
1108 ; /* wait for START_CLK_ALIGN=0 */
1111 /* Set RD/WR Pointer Seperation & COUNTEN & FIFOPTREN */
1112 mrc_alt_write_mask(DDRPHY,
1113 CMDPTRREG + ch * DDRIOCCC_CH_OFFSET,
1114 1, 1); /* WRPTRENABLE=1 */
1117 /* enable bypass for CLK buffer (PO) */
1118 mrc_alt_write_mask(DDRPHY,
1119 COMPEN0CH0 + ch * DDRCOMP_CH_OFFSET,
1121 /* Initial COMP Enable */
1122 mrc_alt_write_mask(DDRPHY, CMPCTRL, 1, 1);
1123 /* wait for Initial COMP Enable = 0 */
1124 while (msg_port_alt_read(DDRPHY, CMPCTRL) & 1)
1126 /* disable bypass for CLK buffer (PO) */
1127 mrc_alt_write_mask(DDRPHY,
1128 COMPEN0CH0 + ch * DDRCOMP_CH_OFFSET,
1134 mrc_alt_write_mask(DDRPHY,
1135 CMDCFGREG0 + ch * DDRIOCCC_CH_OFFSET,
1136 1 << 2, 1 << 2); /* IOBUFACTRST_N=1 */
1138 /* DDRPHY initialization complete */
1139 mrc_alt_write_mask(DDRPHY,
1140 CMDPMCONFIG0 + ch * DDRIOCCC_CH_OFFSET,
1141 1 << 20, 1 << 20); /* SPID_INIT_COMPLETE=1 */
1148 /* This function performs JEDEC initialization on all enabled channels */
1149 void perform_jedec_init(struct mrc_params *mrc_params)
1151 uint8_t twr, wl, rank;
1163 /* jedec_init starts */
1164 mrc_post_code(0x04, 0x00);
1166 /* DDR3_RESET_SET=0, DDR3_RESET_RESET=1 */
1167 mrc_alt_write_mask(DDRPHY, CCDDR3RESETCTL, 2, 0x102);
1169 /* Assert RESET# for 200us */
1172 /* DDR3_RESET_SET=1, DDR3_RESET_RESET=0 */
1173 mrc_alt_write_mask(DDRPHY, CCDDR3RESETCTL, 0x100, 0x102);
1175 dtr0 = msg_port_read(MEM_CTLR, DTR0);
1178 * Set CKEVAL for populated ranks
1179 * then send NOP to each rank (#4550197)
1182 drp = msg_port_read(MEM_CTLR, DRP);
1185 drmc = msg_port_read(MEM_CTLR, DRMC);
1187 drmc |= (DRMC_CKEMODE | drp);
1189 msg_port_write(MEM_CTLR, DRMC, drmc);
1191 for (rank = 0; rank < NUM_RANKS; rank++) {
1192 /* Skip to next populated rank */
1193 if ((mrc_params->rank_enables & (1 << rank)) == 0)
1196 dram_init_command(DCMD_NOP(rank));
1199 msg_port_write(MEM_CTLR, DRMC,
1200 (mrc_params->rd_odt_value == 0 ? DRMC_ODTMODE : 0));
1204 * BIT[15:11] --> Always "0"
1205 * BIT[10:09] --> Rtt_WR: want "Dynamic ODT Off" (0)
1206 * BIT[08] --> Always "0"
1207 * BIT[07] --> SRT: use sr_temp_range
1208 * BIT[06] --> ASR: want "Manual SR Reference" (0)
1209 * BIT[05:03] --> CWL: use oem_tCWL
1210 * BIT[02:00] --> PASR: want "Full Array" (0)
1212 emrs2_cmd |= (2 << 3);
1213 wl = 5 + mrc_params->ddr_speed;
1214 emrs2_cmd |= ((wl - 5) << 9);
1215 emrs2_cmd |= (mrc_params->sr_temp_range << 13);
1219 * BIT[15:03] --> Always "0"
1220 * BIT[02] --> MPR: want "Normal Operation" (0)
1221 * BIT[01:00] --> MPR_Loc: want "Predefined Pattern" (0)
1223 emrs3_cmd |= (3 << 3);
1227 * BIT[15:13] --> Always "0"
1228 * BIT[12:12] --> Qoff: want "Output Buffer Enabled" (0)
1229 * BIT[11:11] --> TDQS: want "Disabled" (0)
1230 * BIT[10:10] --> Always "0"
1231 * BIT[09,06,02] --> Rtt_nom: use rtt_nom_value
1232 * BIT[08] --> Always "0"
1233 * BIT[07] --> WR_LVL: want "Disabled" (0)
1234 * BIT[05,01] --> DIC: use ron_value
1235 * BIT[04:03] --> AL: additive latency want "0" (0)
1236 * BIT[00] --> DLL: want "Enable" (0)
1238 * (BIT5|BIT1) set Ron value
1239 * 00 --> RZQ/6 (40ohm)
1240 * 01 --> RZQ/7 (34ohm)
1243 * (BIT9|BIT6|BIT2) set Rtt_nom value
1245 * 001 --> RZQ/4 ( 60ohm)
1246 * 010 --> RZQ/2 (120ohm)
1247 * 011 --> RZQ/6 ( 40ohm)
1250 emrs1_cmd |= (1 << 3);
1251 emrs1_cmd &= ~(1 << 6);
1253 if (mrc_params->ron_value == 0)
1254 emrs1_cmd |= (1 << 7);
1256 emrs1_cmd &= ~(1 << 7);
1258 if (mrc_params->rtt_nom_value == 0)
1259 emrs1_cmd |= (DDR3_EMRS1_RTTNOM_40 << 6);
1260 else if (mrc_params->rtt_nom_value == 1)
1261 emrs1_cmd |= (DDR3_EMRS1_RTTNOM_60 << 6);
1262 else if (mrc_params->rtt_nom_value == 2)
1263 emrs1_cmd |= (DDR3_EMRS1_RTTNOM_120 << 6);
1265 /* save MRS1 value (excluding control fields) */
1266 mrc_params->mrs1 = emrs1_cmd >> 6;
1270 * BIT[15:13] --> Always "0"
1271 * BIT[12] --> PPD: for Quark (1)
1272 * BIT[11:09] --> WR: use oem_tWR
1273 * BIT[08] --> DLL: want "Reset" (1, self clearing)
1274 * BIT[07] --> MODE: want "Normal" (0)
1275 * BIT[06:04,02] --> CL: use oem_tCAS
1276 * BIT[03] --> RD_BURST_TYPE: want "Interleave" (1)
1277 * BIT[01:00] --> BL: want "8 Fixed" (0)
1288 * BIT[02:02] "0" if oem_tCAS <= 11 (1866?)
1289 * BIT[06:04] use oem_tCAS-4
1291 mrs0_cmd |= (1 << 14);
1292 mrs0_cmd |= (1 << 18);
1293 mrs0_cmd |= ((((dtr0 >> 12) & 7) + 1) << 10);
1295 tck = t_ck[mrc_params->ddr_speed];
1296 /* Per JEDEC: tWR=15000ps DDR2/3 from 800-1600 */
1297 twr = MCEIL(15000, tck);
1298 mrs0_cmd |= ((twr - 4) << 15);
1300 for (rank = 0; rank < NUM_RANKS; rank++) {
1301 /* Skip to next populated rank */
1302 if ((mrc_params->rank_enables & (1 << rank)) == 0)
1305 emrs2_cmd |= (rank << 22);
1306 dram_init_command(emrs2_cmd);
1308 emrs3_cmd |= (rank << 22);
1309 dram_init_command(emrs3_cmd);
1311 emrs1_cmd |= (rank << 22);
1312 dram_init_command(emrs1_cmd);
1314 mrs0_cmd |= (rank << 22);
1315 dram_init_command(mrs0_cmd);
1317 dram_init_command(DCMD_ZQCL(rank));
1324 * Dunit Initialization Complete
1326 * Indicates that initialization of the Dunit has completed.
1328 * Memory accesses are permitted and maintenance operation begins.
1329 * Until this bit is set to a 1, the memory controller will not accept
1330 * DRAM requests from the MEMORY_MANAGER or HTE.
1332 void set_ddr_init_complete(struct mrc_params *mrc_params)
1338 dco = msg_port_read(MEM_CTLR, DCO);
1341 msg_port_write(MEM_CTLR, DCO, dco);
1347 * This function will retrieve relevant timing data
1349 * This data will be used on subsequent boots to speed up boot times
1350 * and is required for Suspend To RAM capabilities.
1352 void restore_timings(struct mrc_params *mrc_params)
1355 const struct mrc_timings *mt = &mrc_params->timings;
1357 for (ch = 0; ch < NUM_CHANNELS; ch++) {
1358 for (rk = 0; rk < NUM_RANKS; rk++) {
1359 for (bl = 0; bl < NUM_BYTE_LANES; bl++) {
1360 set_rcvn(ch, rk, bl, mt->rcvn[ch][rk][bl]);
1361 set_rdqs(ch, rk, bl, mt->rdqs[ch][rk][bl]);
1362 set_wdqs(ch, rk, bl, mt->wdqs[ch][rk][bl]);
1363 set_wdq(ch, rk, bl, mt->wdq[ch][rk][bl]);
1365 /* VREF (RANK0 only) */
1366 set_vref(ch, bl, mt->vref[ch][bl]);
1369 set_wctl(ch, rk, mt->wctl[ch][rk]);
1371 set_wcmd(ch, mt->wcmd[ch]);
1376 * Configure default settings normally set as part of read training
1378 * Some defaults have to be set earlier as they may affect earlier
1381 void default_timings(struct mrc_params *mrc_params)
1385 for (ch = 0; ch < NUM_CHANNELS; ch++) {
1386 for (rk = 0; rk < NUM_RANKS; rk++) {
1387 for (bl = 0; bl < NUM_BYTE_LANES; bl++) {
1388 set_rdqs(ch, rk, bl, 24);
1390 /* VREF (RANK0 only) */
1391 set_vref(ch, bl, 32);
1399 * This function will perform our RCVEN Calibration Algorithm.
1400 * We will only use the 2xCLK domain timings to perform RCVEN Calibration.
1401 * All byte lanes will be calibrated "simultaneously" per channel per rank.
1403 void rcvn_cal(struct mrc_params *mrc_params)
1405 uint8_t ch; /* channel counter */
1406 uint8_t rk; /* rank counter */
1407 uint8_t bl; /* byte lane counter */
1408 uint8_t bl_divisor = (mrc_params->channel_width == X16) ? 2 : 1;
1411 /* used to find placement for rank2rank sharing configs */
1412 uint32_t final_delay[NUM_CHANNELS][NUM_BYTE_LANES];
1414 /* used to find placement for rank2rank sharing configs */
1415 uint32_t num_ranks_enabled = 0;
1422 /* absolute PI value to be programmed on the byte lane */
1423 uint32_t delay[NUM_BYTE_LANES];
1424 u32 dtr1, dtr1_save;
1429 /* rcvn_cal starts */
1430 mrc_post_code(0x05, 0x00);
1433 /* need separate burst to sample DQS preamble */
1434 dtr1 = msg_port_read(MEM_CTLR, DTR1);
1436 dtr1 |= DTR1_TCCD_12CLK;
1437 msg_port_write(MEM_CTLR, DTR1, dtr1);
1441 /* need to set "final_delay[][]" elements to "0" */
1442 memset((void *)(final_delay), 0x00, (size_t)sizeof(final_delay));
1445 /* loop through each enabled channel */
1446 for (ch = 0; ch < NUM_CHANNELS; ch++) {
1447 if (mrc_params->channel_enables & (1 << ch)) {
1448 /* perform RCVEN Calibration on a per rank basis */
1449 for (rk = 0; rk < NUM_RANKS; rk++) {
1450 if (mrc_params->rank_enables & (1 << rk)) {
1452 * POST_CODE here indicates the current
1453 * channel and rank being calibrated
1455 mrc_post_code(0x05, 0x10 + ((ch << 4) | rk));
1458 /* et hard-coded timing values */
1459 for (bl = 0; bl < (NUM_BYTE_LANES / bl_divisor); bl++)
1460 set_rcvn(ch, rk, bl, ddr_rcvn[PLATFORM_ID]);
1462 /* enable FIFORST */
1463 for (bl = 0; bl < (NUM_BYTE_LANES / bl_divisor); bl += 2) {
1464 mrc_alt_write_mask(DDRPHY,
1466 (bl >> 1) * DDRIODQ_BL_OFFSET +
1467 ch * DDRIODQ_CH_OFFSET,
1470 /* initialize the starting delay to 128 PI (cas +1 CLK) */
1471 for (bl = 0; bl < (NUM_BYTE_LANES / bl_divisor); bl++) {
1472 /* 1x CLK domain timing is cas-4 */
1473 delay[bl] = (4 + 1) * FULL_CLK;
1475 set_rcvn(ch, rk, bl, delay[bl]);
1478 /* now find the rising edge */
1479 find_rising_edge(mrc_params, delay, ch, rk, true);
1481 /* Now increase delay by 32 PI (1/4 CLK) to place in center of high pulse */
1482 for (bl = 0; bl < (NUM_BYTE_LANES / bl_divisor); bl++) {
1483 delay[bl] += QRTR_CLK;
1484 set_rcvn(ch, rk, bl, delay[bl]);
1486 /* Now decrement delay by 128 PI (1 CLK) until we sample a "0" */
1488 temp = sample_dqs(mrc_params, ch, rk, true);
1489 for (bl = 0; bl < (NUM_BYTE_LANES / bl_divisor); bl++) {
1490 if (temp & (1 << bl)) {
1491 if (delay[bl] >= FULL_CLK) {
1492 delay[bl] -= FULL_CLK;
1493 set_rcvn(ch, rk, bl, delay[bl]);
1495 /* not enough delay */
1496 training_message(ch, rk, bl);
1497 mrc_post_code(0xee, 0x50);
1501 } while (temp & 0xff);
1504 /* increment "num_ranks_enabled" */
1505 num_ranks_enabled++;
1506 /* Finally increment delay by 32 PI (1/4 CLK) to place in center of preamble */
1507 for (bl = 0; bl < (NUM_BYTE_LANES / bl_divisor); bl++) {
1508 delay[bl] += QRTR_CLK;
1509 /* add "delay[]" values to "final_delay[][]" for rolling average */
1510 final_delay[ch][bl] += delay[bl];
1511 /* set timing based on rolling average values */
1512 set_rcvn(ch, rk, bl, final_delay[ch][bl] / num_ranks_enabled);
1515 /* Finally increment delay by 32 PI (1/4 CLK) to place in center of preamble */
1516 for (bl = 0; bl < (NUM_BYTE_LANES / bl_divisor); bl++) {
1517 delay[bl] += QRTR_CLK;
1518 set_rcvn(ch, rk, bl, delay[bl]);
1522 /* disable FIFORST */
1523 for (bl = 0; bl < (NUM_BYTE_LANES / bl_divisor); bl += 2) {
1524 mrc_alt_write_mask(DDRPHY,
1526 (bl >> 1) * DDRIODQ_BL_OFFSET +
1527 ch * DDRIODQ_CH_OFFSET,
1537 /* restore original */
1538 msg_port_write(MEM_CTLR, DTR1, dtr1_save);
1545 * This function will perform the Write Levelling algorithm
1546 * (align WCLK and WDQS).
1548 * This algorithm will act on each rank in each channel separately.
1550 void wr_level(struct mrc_params *mrc_params)
1552 uint8_t ch; /* channel counter */
1553 uint8_t rk; /* rank counter */
1554 uint8_t bl; /* byte lane counter */
1555 uint8_t bl_divisor = (mrc_params->channel_width == X16) ? 2 : 1;
1558 /* used to find placement for rank2rank sharing configs */
1559 uint32_t final_delay[NUM_CHANNELS][NUM_BYTE_LANES];
1561 /* used to find placement for rank2rank sharing configs */
1562 uint32_t num_ranks_enabled = 0;
1568 /* determines stop condition for CRS_WR_LVL */
1569 bool all_edges_found;
1570 /* absolute PI value to be programmed on the byte lane */
1571 uint32_t delay[NUM_BYTE_LANES];
1573 * static makes it so the data is loaded in the heap once by shadow(),
1574 * where non-static copies the data onto the stack every time this
1575 * function is called
1577 uint32_t address; /* address to be checked during COARSE_WR_LVL */
1578 u32 dtr4, dtr4_save;
1583 /* wr_level starts */
1584 mrc_post_code(0x06, 0x00);
1587 /* need to set "final_delay[][]" elements to "0" */
1588 memset((void *)(final_delay), 0x00, (size_t)sizeof(final_delay));
1591 /* loop through each enabled channel */
1592 for (ch = 0; ch < NUM_CHANNELS; ch++) {
1593 if (mrc_params->channel_enables & (1 << ch)) {
1594 /* perform WRITE LEVELING algorithm on a per rank basis */
1595 for (rk = 0; rk < NUM_RANKS; rk++) {
1596 if (mrc_params->rank_enables & (1 << rk)) {
1598 * POST_CODE here indicates the current
1599 * rank and channel being calibrated
1601 mrc_post_code(0x06, 0x10 + ((ch << 4) | rk));
1604 for (bl = 0; bl < (NUM_BYTE_LANES / bl_divisor); bl++) {
1605 set_wdqs(ch, rk, bl, ddr_wdqs[PLATFORM_ID]);
1606 set_wdq(ch, rk, bl, ddr_wdqs[PLATFORM_ID] - QRTR_CLK);
1610 * perform a single PRECHARGE_ALL command to
1611 * make DRAM state machine go to IDLE state
1613 dram_init_command(DCMD_PREA(rk));
1616 * enable Write Levelling Mode
1617 * (EMRS1 w/ Write Levelling Mode Enable)
1619 dram_init_command(DCMD_MRS1(rk, 0x82));
1622 * set ODT DRAM Full Time Termination
1626 dtr4 = msg_port_read(MEM_CTLR, DTR4);
1628 dtr4 |= DTR4_ODTDIS;
1629 msg_port_write(MEM_CTLR, DTR4, dtr4);
1631 for (bl = 0; bl < (NUM_BYTE_LANES / bl_divisor) / 2; bl++) {
1633 * Enable Sandy Bridge Mode (WDQ Tri-State) &
1634 * Ensure 5 WDQS pulses during Write Leveling
1636 mrc_alt_write_mask(DDRPHY,
1637 DQCTL + DDRIODQ_BL_OFFSET * bl + DDRIODQ_CH_OFFSET * ch,
1642 /* Write Leveling Mode enabled in IO */
1643 mrc_alt_write_mask(DDRPHY,
1644 CCDDR3RESETCTL + DDRIOCCC_CH_OFFSET * ch,
1647 /* Initialize the starting delay to WCLK */
1648 for (bl = 0; bl < (NUM_BYTE_LANES / bl_divisor); bl++) {
1653 delay[bl] = get_wclk(ch, rk);
1655 set_wdqs(ch, rk, bl, delay[bl]);
1658 /* now find the rising edge */
1659 find_rising_edge(mrc_params, delay, ch, rk, false);
1661 /* disable Write Levelling Mode */
1662 mrc_alt_write_mask(DDRPHY,
1663 CCDDR3RESETCTL + DDRIOCCC_CH_OFFSET * ch,
1666 for (bl = 0; bl < (NUM_BYTE_LANES / bl_divisor) / 2; bl++) {
1667 /* Disable Sandy Bridge Mode & Ensure 4 WDQS pulses during normal operation */
1668 mrc_alt_write_mask(DDRPHY,
1669 DQCTL + DDRIODQ_BL_OFFSET * bl + DDRIODQ_CH_OFFSET * ch,
1674 /* restore original DTR4 */
1675 msg_port_write(MEM_CTLR, DTR4, dtr4_save);
1678 * restore original value
1679 * (Write Levelling Mode Disable)
1681 dram_init_command(DCMD_MRS1(rk, mrc_params->mrs1));
1684 * perform a single PRECHARGE_ALL command to
1685 * make DRAM state machine go to IDLE state
1687 dram_init_command(DCMD_PREA(rk));
1689 mrc_post_code(0x06, 0x30 + ((ch << 4) | rk));
1692 * COARSE WRITE LEVEL:
1693 * check that we're on the correct clock edge
1696 /* hte reconfiguration request */
1697 mrc_params->hte_setup = 1;
1699 /* start CRS_WR_LVL with WDQS = WDQS + 128 PI */
1700 for (bl = 0; bl < (NUM_BYTE_LANES / bl_divisor); bl++) {
1701 delay[bl] = get_wdqs(ch, rk, bl) + FULL_CLK;
1702 set_wdqs(ch, rk, bl, delay[bl]);
1704 * program WDQ timings based on WDQS
1705 * (WDQ = WDQS - 32 PI)
1707 set_wdq(ch, rk, bl, (delay[bl] - QRTR_CLK));
1710 /* get an address in the targeted channel/rank */
1711 address = get_addr(ch, rk);
1713 uint32_t coarse_result = 0x00;
1714 uint32_t coarse_result_mask = byte_lane_mask(mrc_params);
1716 all_edges_found = true;
1718 mrc_params->hte_setup = 1;
1719 coarse_result = check_rw_coarse(mrc_params, address);
1721 /* check for failures and margin the byte lane back 128 PI (1 CLK) */
1722 for (bl = 0; bl < NUM_BYTE_LANES / bl_divisor; bl++) {
1723 if (coarse_result & (coarse_result_mask << bl)) {
1724 all_edges_found = false;
1725 delay[bl] -= FULL_CLK;
1726 set_wdqs(ch, rk, bl, delay[bl]);
1727 /* program WDQ timings based on WDQS (WDQ = WDQS - 32 PI) */
1728 set_wdq(ch, rk, bl, delay[bl] - QRTR_CLK);
1731 } while (!all_edges_found);
1734 /* increment "num_ranks_enabled" */
1735 num_ranks_enabled++;
1736 /* accumulate "final_delay[][]" values from "delay[]" values for rolling average */
1737 for (bl = 0; bl < NUM_BYTE_LANES / bl_divisor; bl++) {
1738 final_delay[ch][bl] += delay[bl];
1739 set_wdqs(ch, rk, bl, final_delay[ch][bl] / num_ranks_enabled);
1740 /* program WDQ timings based on WDQS (WDQ = WDQS - 32 PI) */
1741 set_wdq(ch, rk, bl, final_delay[ch][bl] / num_ranks_enabled - QRTR_CLK);
1753 void prog_page_ctrl(struct mrc_params *mrc_params)
1759 dpmc0 = msg_port_read(MEM_CTLR, DPMC0);
1760 dpmc0 &= ~DPMC0_PCLSTO_MASK;
1762 dpmc0 |= DPMC0_PREAPWDEN;
1763 msg_port_write(MEM_CTLR, DPMC0, dpmc0);
1767 * This function will perform the READ TRAINING Algorithm on all
1768 * channels/ranks/byte_lanes simultaneously to minimize execution time.
1770 * The idea here is to train the VREF and RDQS (and eventually RDQ) values
1771 * to achieve maximum READ margins. The algorithm will first determine the
1772 * X coordinate (RDQS setting). This is done by collapsing the VREF eye
1773 * until we find a minimum required RDQS eye for VREF_MIN and VREF_MAX.
1774 * Then we take the averages of the RDQS eye at VREF_MIN and VREF_MAX,
1775 * then average those; this will be the final X coordinate. The algorithm
1776 * will then determine the Y coordinate (VREF setting). This is done by
1777 * collapsing the RDQS eye until we find a minimum required VREF eye for
1778 * RDQS_MIN and RDQS_MAX. Then we take the averages of the VREF eye at
1779 * RDQS_MIN and RDQS_MAX, then average those; this will be the final Y
1782 * NOTE: this algorithm assumes the eye curves have a one-to-one relationship,
1783 * meaning for each X the curve has only one Y and vice-a-versa.
1785 void rd_train(struct mrc_params *mrc_params)
1787 uint8_t ch; /* channel counter */
1788 uint8_t rk; /* rank counter */
1789 uint8_t bl; /* byte lane counter */
1790 uint8_t bl_divisor = (mrc_params->channel_width == X16) ? 2 : 1;
1793 uint8_t side_x; /* tracks LEFT/RIGHT approach vectors */
1794 uint8_t side_y; /* tracks BOTTOM/TOP approach vectors */
1795 /* X coordinate data (passing RDQS values) for approach vectors */
1796 uint8_t x_coordinate[2][2][NUM_CHANNELS][NUM_RANKS][NUM_BYTE_LANES];
1797 /* Y coordinate data (passing VREF values) for approach vectors */
1798 uint8_t y_coordinate[2][2][NUM_CHANNELS][NUM_BYTE_LANES];
1799 /* centered X (RDQS) */
1800 uint8_t x_center[NUM_CHANNELS][NUM_RANKS][NUM_BYTE_LANES];
1801 /* centered Y (VREF) */
1802 uint8_t y_center[NUM_CHANNELS][NUM_BYTE_LANES];
1803 uint32_t address; /* target address for check_bls_ex() */
1804 uint32_t result; /* result of check_bls_ex() */
1805 uint32_t bl_mask; /* byte lane mask for result checking */
1807 /* used to find placement for rank2rank sharing configs */
1808 uint32_t final_delay[NUM_CHANNELS][NUM_BYTE_LANES];
1809 /* used to find placement for rank2rank sharing configs */
1810 uint32_t num_ranks_enabled = 0;
1814 /* rd_train starts */
1815 mrc_post_code(0x07, 0x00);
1820 for (ch = 0; ch < NUM_CHANNELS; ch++) {
1821 if (mrc_params->channel_enables & (1 << ch)) {
1822 for (rk = 0; rk < NUM_RANKS; rk++) {
1823 if (mrc_params->rank_enables & (1 << rk)) {
1825 bl < NUM_BYTE_LANES / bl_divisor;
1827 set_rdqs(ch, rk, bl, ddr_rdqs[PLATFORM_ID]);
1834 /* initialize x/y_coordinate arrays */
1835 for (ch = 0; ch < NUM_CHANNELS; ch++) {
1836 if (mrc_params->channel_enables & (1 << ch)) {
1837 for (rk = 0; rk < NUM_RANKS; rk++) {
1838 if (mrc_params->rank_enables & (1 << rk)) {
1840 bl < NUM_BYTE_LANES / bl_divisor;
1843 x_coordinate[L][B][ch][rk][bl] = RDQS_MIN;
1844 x_coordinate[R][B][ch][rk][bl] = RDQS_MAX;
1845 x_coordinate[L][T][ch][rk][bl] = RDQS_MIN;
1846 x_coordinate[R][T][ch][rk][bl] = RDQS_MAX;
1848 y_coordinate[L][B][ch][bl] = VREF_MIN;
1849 y_coordinate[R][B][ch][bl] = VREF_MIN;
1850 y_coordinate[L][T][ch][bl] = VREF_MAX;
1851 y_coordinate[R][T][ch][bl] = VREF_MAX;
1858 /* initialize other variables */
1859 bl_mask = byte_lane_mask(mrc_params);
1860 address = get_addr(0, 0);
1863 /* need to set "final_delay[][]" elements to "0" */
1864 memset((void *)(final_delay), 0x00, (size_t)sizeof(final_delay));
1867 /* look for passing coordinates */
1868 for (side_y = B; side_y <= T; side_y++) {
1869 for (side_x = L; side_x <= R; side_x++) {
1870 mrc_post_code(0x07, 0x10 + side_y * 2 + side_x);
1872 /* find passing values */
1873 for (ch = 0; ch < NUM_CHANNELS; ch++) {
1874 if (mrc_params->channel_enables & (0x1 << ch)) {
1875 for (rk = 0; rk < NUM_RANKS; rk++) {
1876 if (mrc_params->rank_enables &
1878 /* set x/y_coordinate search starting settings */
1880 bl < NUM_BYTE_LANES / bl_divisor;
1882 set_rdqs(ch, rk, bl,
1883 x_coordinate[side_x][side_y][ch][rk][bl]);
1885 y_coordinate[side_x][side_y][ch][bl]);
1888 /* get an address in the target channel/rank */
1889 address = get_addr(ch, rk);
1891 /* request HTE reconfiguration */
1892 mrc_params->hte_setup = 1;
1894 /* test the settings */
1896 /* result[07:00] == failing byte lane (MAX 8) */
1897 result = check_bls_ex(mrc_params, address);
1899 /* check for failures */
1900 if (result & 0xff) {
1901 /* at least 1 byte lane failed */
1902 for (bl = 0; bl < NUM_BYTE_LANES / bl_divisor; bl++) {
1905 /* adjust the RDQS values accordingly */
1907 x_coordinate[L][side_y][ch][rk][bl] += RDQS_STEP;
1909 x_coordinate[R][side_y][ch][rk][bl] -= RDQS_STEP;
1911 /* check that we haven't closed the RDQS_EYE too much */
1912 if ((x_coordinate[L][side_y][ch][rk][bl] > (RDQS_MAX - MIN_RDQS_EYE)) ||
1913 (x_coordinate[R][side_y][ch][rk][bl] < (RDQS_MIN + MIN_RDQS_EYE)) ||
1914 (x_coordinate[L][side_y][ch][rk][bl] ==
1915 x_coordinate[R][side_y][ch][rk][bl])) {
1917 * not enough RDQS margin available at this VREF
1918 * update VREF values accordingly
1921 y_coordinate[side_x][B][ch][bl] += VREF_STEP;
1923 y_coordinate[side_x][T][ch][bl] -= VREF_STEP;
1925 /* check that we haven't closed the VREF_EYE too much */
1926 if ((y_coordinate[side_x][B][ch][bl] > (VREF_MAX - MIN_VREF_EYE)) ||
1927 (y_coordinate[side_x][T][ch][bl] < (VREF_MIN + MIN_VREF_EYE)) ||
1928 (y_coordinate[side_x][B][ch][bl] == y_coordinate[side_x][T][ch][bl])) {
1929 /* VREF_EYE collapsed below MIN_VREF_EYE */
1930 training_message(ch, rk, bl);
1931 mrc_post_code(0xEE, 0x70 + side_y * 2 + side_x);
1933 /* update the VREF setting */
1934 set_vref(ch, bl, y_coordinate[side_x][side_y][ch][bl]);
1935 /* reset the X coordinate to begin the search at the new VREF */
1936 x_coordinate[side_x][side_y][ch][rk][bl] =
1937 (side_x == L) ? RDQS_MIN : RDQS_MAX;
1941 /* update the RDQS setting */
1942 set_rdqs(ch, rk, bl, x_coordinate[side_x][side_y][ch][rk][bl]);
1946 } while (result & 0xff);
1954 mrc_post_code(0x07, 0x20);
1956 /* find final RDQS (X coordinate) & final VREF (Y coordinate) */
1957 for (ch = 0; ch < NUM_CHANNELS; ch++) {
1958 if (mrc_params->channel_enables & (1 << ch)) {
1959 for (rk = 0; rk < NUM_RANKS; rk++) {
1960 if (mrc_params->rank_enables & (1 << rk)) {
1961 for (bl = 0; bl < (NUM_BYTE_LANES / bl_divisor); bl++) {
1967 "RDQS T/B eye rank%d lane%d : %d-%d %d-%d\n",
1969 x_coordinate[L][T][ch][rk][bl],
1970 x_coordinate[R][T][ch][rk][bl],
1971 x_coordinate[L][B][ch][rk][bl],
1972 x_coordinate[R][B][ch][rk][bl]);
1974 /* average the TOP side LEFT & RIGHT values */
1975 temp1 = (x_coordinate[R][T][ch][rk][bl] + x_coordinate[L][T][ch][rk][bl]) / 2;
1976 /* average the BOTTOM side LEFT & RIGHT values */
1977 temp2 = (x_coordinate[R][B][ch][rk][bl] + x_coordinate[L][B][ch][rk][bl]) / 2;
1978 /* average the above averages */
1979 x_center[ch][rk][bl] = (uint8_t) ((temp1 + temp2) / 2);
1983 "VREF R/L eye lane%d : %d-%d %d-%d\n",
1985 y_coordinate[R][B][ch][bl],
1986 y_coordinate[R][T][ch][bl],
1987 y_coordinate[L][B][ch][bl],
1988 y_coordinate[L][T][ch][bl]);
1990 /* average the RIGHT side TOP & BOTTOM values */
1991 temp1 = (y_coordinate[R][T][ch][bl] + y_coordinate[R][B][ch][bl]) / 2;
1992 /* average the LEFT side TOP & BOTTOM values */
1993 temp2 = (y_coordinate[L][T][ch][bl] + y_coordinate[L][B][ch][bl]) / 2;
1994 /* average the above averages */
1995 y_center[ch][bl] = (uint8_t) ((temp1 + temp2) / 2);
2003 /* perform an eye check */
2004 for (side_y = B; side_y <= T; side_y++) {
2005 for (side_x = L; side_x <= R; side_x++) {
2006 mrc_post_code(0x07, 0x30 + side_y * 2 + side_x);
2008 /* update the settings for the eye check */
2009 for (ch = 0; ch < NUM_CHANNELS; ch++) {
2010 if (mrc_params->channel_enables & (1 << ch)) {
2011 for (rk = 0; rk < NUM_RANKS; rk++) {
2012 if (mrc_params->rank_enables & (1 << rk)) {
2013 for (bl = 0; bl < NUM_BYTE_LANES / bl_divisor; bl++) {
2015 set_rdqs(ch, rk, bl, x_center[ch][rk][bl] - (MIN_RDQS_EYE / 2));
2017 set_rdqs(ch, rk, bl, x_center[ch][rk][bl] + (MIN_RDQS_EYE / 2));
2020 set_vref(ch, bl, y_center[ch][bl] - (MIN_VREF_EYE / 2));
2022 set_vref(ch, bl, y_center[ch][bl] + (MIN_VREF_EYE / 2));
2029 /* request HTE reconfiguration */
2030 mrc_params->hte_setup = 1;
2033 if (check_bls_ex(mrc_params, address) & 0xff) {
2034 /* one or more byte lanes failed */
2035 mrc_post_code(0xee, 0x74 + side_x * 2 + side_y);
2041 mrc_post_code(0x07, 0x40);
2043 /* set final placements */
2044 for (ch = 0; ch < NUM_CHANNELS; ch++) {
2045 if (mrc_params->channel_enables & (1 << ch)) {
2046 for (rk = 0; rk < NUM_RANKS; rk++) {
2047 if (mrc_params->rank_enables & (1 << rk)) {
2049 /* increment "num_ranks_enabled" */
2050 num_ranks_enabled++;
2052 for (bl = 0; bl < (NUM_BYTE_LANES / bl_divisor); bl++) {
2055 final_delay[ch][bl] += x_center[ch][rk][bl];
2056 set_rdqs(ch, rk, bl, final_delay[ch][bl] / num_ranks_enabled);
2058 set_rdqs(ch, rk, bl, x_center[ch][rk][bl]);
2061 set_vref(ch, bl, y_center[ch][bl]);
2073 * This function will perform the WRITE TRAINING Algorithm on all
2074 * channels/ranks/byte_lanes simultaneously to minimize execution time.
2076 * The idea here is to train the WDQ timings to achieve maximum WRITE margins.
2077 * The algorithm will start with WDQ at the current WDQ setting (tracks WDQS
2078 * in WR_LVL) +/- 32 PIs (+/- 1/4 CLK) and collapse the eye until all data
2079 * patterns pass. This is because WDQS will be aligned to WCLK by the
2080 * Write Leveling algorithm and WDQ will only ever have a 1/2 CLK window
2083 void wr_train(struct mrc_params *mrc_params)
2085 uint8_t ch; /* channel counter */
2086 uint8_t rk; /* rank counter */
2087 uint8_t bl; /* byte lane counter */
2088 uint8_t bl_divisor = (mrc_params->channel_width == X16) ? 2 : 1;
2091 uint8_t side; /* LEFT/RIGHT side indicator (0=L, 1=R) */
2092 uint32_t temp; /* temporary DWORD */
2093 /* 2 arrays, for L & R side passing delays */
2094 uint32_t delay[2][NUM_CHANNELS][NUM_RANKS][NUM_BYTE_LANES];
2095 uint32_t address; /* target address for check_bls_ex() */
2096 uint32_t result; /* result of check_bls_ex() */
2097 uint32_t bl_mask; /* byte lane mask for result checking */
2099 /* used to find placement for rank2rank sharing configs */
2100 uint32_t final_delay[NUM_CHANNELS][NUM_BYTE_LANES];
2101 /* used to find placement for rank2rank sharing configs */
2102 uint32_t num_ranks_enabled = 0;
2106 /* wr_train starts */
2107 mrc_post_code(0x08, 0x00);
2112 for (ch = 0; ch < NUM_CHANNELS; ch++) {
2113 if (mrc_params->channel_enables & (1 << ch)) {
2114 for (rk = 0; rk < NUM_RANKS; rk++) {
2115 if (mrc_params->rank_enables & (1 << rk)) {
2117 bl < NUM_BYTE_LANES / bl_divisor;
2119 set_wdq(ch, rk, bl, ddr_wdq[PLATFORM_ID]);
2126 /* initialize "delay" */
2127 for (ch = 0; ch < NUM_CHANNELS; ch++) {
2128 if (mrc_params->channel_enables & (1 << ch)) {
2129 for (rk = 0; rk < NUM_RANKS; rk++) {
2130 if (mrc_params->rank_enables & (1 << rk)) {
2132 bl < NUM_BYTE_LANES / bl_divisor;
2135 * want to start with
2136 * WDQ = (WDQS - QRTR_CLK)
2139 temp = get_wdqs(ch, rk, bl) - QRTR_CLK;
2140 delay[L][ch][rk][bl] = temp - QRTR_CLK;
2141 delay[R][ch][rk][bl] = temp + QRTR_CLK;
2148 /* initialize other variables */
2149 bl_mask = byte_lane_mask(mrc_params);
2150 address = get_addr(0, 0);
2153 /* need to set "final_delay[][]" elements to "0" */
2154 memset((void *)(final_delay), 0x00, (size_t)sizeof(final_delay));
2158 * start algorithm on the LEFT side and train each channel/bl
2159 * until no failures are observed, then repeat for the RIGHT side.
2161 for (side = L; side <= R; side++) {
2162 mrc_post_code(0x08, 0x10 + side);
2164 /* set starting values */
2165 for (ch = 0; ch < NUM_CHANNELS; ch++) {
2166 if (mrc_params->channel_enables & (1 << ch)) {
2167 for (rk = 0; rk < NUM_RANKS; rk++) {
2168 if (mrc_params->rank_enables &
2171 bl < NUM_BYTE_LANES / bl_divisor;
2173 set_wdq(ch, rk, bl, delay[side][ch][rk][bl]);
2180 /* find passing values */
2181 for (ch = 0; ch < NUM_CHANNELS; ch++) {
2182 if (mrc_params->channel_enables & (1 << ch)) {
2183 for (rk = 0; rk < NUM_RANKS; rk++) {
2184 if (mrc_params->rank_enables &
2186 /* get an address in the target channel/rank */
2187 address = get_addr(ch, rk);
2189 /* request HTE reconfiguration */
2190 mrc_params->hte_setup = 1;
2192 /* check the settings */
2194 /* result[07:00] == failing byte lane (MAX 8) */
2195 result = check_bls_ex(mrc_params, address);
2196 /* check for failures */
2197 if (result & 0xff) {
2198 /* at least 1 byte lane failed */
2199 for (bl = 0; bl < NUM_BYTE_LANES / bl_divisor; bl++) {
2203 delay[L][ch][rk][bl] += WDQ_STEP;
2205 delay[R][ch][rk][bl] -= WDQ_STEP;
2207 /* check for algorithm failure */
2208 if (delay[L][ch][rk][bl] != delay[R][ch][rk][bl]) {
2211 * update delay setting
2214 delay[side][ch][rk][bl]);
2217 * no margin available
2218 * notify the user and halt
2220 training_message(ch, rk, bl);
2221 mrc_post_code(0xee, 0x80 + side);
2226 /* stop when all byte lanes pass */
2227 } while (result & 0xff);
2234 /* program WDQ to the middle of passing window */
2235 for (ch = 0; ch < NUM_CHANNELS; ch++) {
2236 if (mrc_params->channel_enables & (1 << ch)) {
2237 for (rk = 0; rk < NUM_RANKS; rk++) {
2238 if (mrc_params->rank_enables & (1 << rk)) {
2240 /* increment "num_ranks_enabled" */
2241 num_ranks_enabled++;
2243 for (bl = 0; bl < NUM_BYTE_LANES / bl_divisor; bl++) {
2245 "WDQ eye rank%d lane%d : %d-%d\n",
2247 delay[L][ch][rk][bl],
2248 delay[R][ch][rk][bl]);
2250 temp = (delay[R][ch][rk][bl] + delay[L][ch][rk][bl]) / 2;
2253 final_delay[ch][bl] += temp;
2255 final_delay[ch][bl] / num_ranks_enabled);
2257 set_wdq(ch, rk, bl, temp);
2270 * This function will store relevant timing data
2272 * This data will be used on subsequent boots to speed up boot times
2273 * and is required for Suspend To RAM capabilities.
2275 void store_timings(struct mrc_params *mrc_params)
2278 struct mrc_timings *mt = &mrc_params->timings;
2280 for (ch = 0; ch < NUM_CHANNELS; ch++) {
2281 for (rk = 0; rk < NUM_RANKS; rk++) {
2282 for (bl = 0; bl < NUM_BYTE_LANES; bl++) {
2283 mt->rcvn[ch][rk][bl] = get_rcvn(ch, rk, bl);
2284 mt->rdqs[ch][rk][bl] = get_rdqs(ch, rk, bl);
2285 mt->wdqs[ch][rk][bl] = get_wdqs(ch, rk, bl);
2286 mt->wdq[ch][rk][bl] = get_wdq(ch, rk, bl);
2289 mt->vref[ch][bl] = get_vref(ch, bl);
2292 mt->wctl[ch][rk] = get_wctl(ch, rk);
2295 mt->wcmd[ch] = get_wcmd(ch);
2298 /* need to save for a case of changing frequency after warm reset */
2299 mt->ddr_speed = mrc_params->ddr_speed;
2303 * The purpose of this function is to ensure the SEC comes out of reset
2304 * and IA initiates the SEC enabling Memory Scrambling.
2306 void enable_scrambling(struct mrc_params *mrc_params)
2311 if (mrc_params->scrambling_enables == 0)
2316 /* 32 bit seed is always stored in BIOS NVM */
2317 lfsr = mrc_params->timings.scrambler_seed;
2319 if (mrc_params->boot_mode == BM_COLD) {
2321 * factory value is 0 and in first boot,
2322 * a clock based seed is loaded.
2326 * get seed from system clock
2327 * and make sure it is not all 1's
2329 lfsr = rdtsc() & 0x0fffffff;
2332 * Need to replace scrambler
2334 * get next 32bit LFSR 16 times which is the last
2335 * part of the previous scrambler vector
2337 for (i = 0; i < 16; i++)
2342 mrc_params->timings.scrambler_seed = lfsr;
2346 * In warm boot or S3 exit, we have the previous seed.
2347 * In cold boot, we have the last 32bit LFSR which is the new seed.
2349 lfsr32(&lfsr); /* shift to next value */
2350 msg_port_write(MEM_CTLR, SCRMSEED, (lfsr & 0x0003ffff));
2352 for (i = 0; i < 2; i++)
2353 msg_port_write(MEM_CTLR, SCRMLO + i, (lfsr & 0xaaaaaaaa));
2359 * Configure MCU Power Management Control Register
2360 * and Scheduler Control Register
2362 void prog_ddr_control(struct mrc_params *mrc_params)
2369 dsch = msg_port_read(MEM_CTLR, DSCH);
2370 dsch &= ~(DSCH_OOODIS | DSCH_OOOST3DIS | DSCH_NEWBYPDIS);
2371 msg_port_write(MEM_CTLR, DSCH, dsch);
2373 dpmc0 = msg_port_read(MEM_CTLR, DPMC0);
2374 dpmc0 &= ~DPMC0_DISPWRDN;
2375 dpmc0 |= (mrc_params->power_down_disable << 25);
2376 dpmc0 &= ~DPMC0_CLKGTDIS;
2377 dpmc0 &= ~DPMC0_PCLSTO_MASK;
2379 dpmc0 |= DPMC0_PREAPWDEN;
2380 msg_port_write(MEM_CTLR, DPMC0, dpmc0);
2382 /* CMDTRIST = 2h - CMD/ADDR are tristated when no valid command */
2383 mrc_write_mask(MEM_CTLR, DPMC1, 0x20, 0x30);
2389 * After training complete configure MCU Rank Population Register
2390 * specifying: ranks enabled, device width, density, address mode
2392 void prog_dra_drb(struct mrc_params *mrc_params)
2396 u8 density = mrc_params->params.density;
2400 dco = msg_port_read(MEM_CTLR, DCO);
2402 msg_port_write(MEM_CTLR, DCO, dco);
2405 if (mrc_params->rank_enables & 1)
2407 if (mrc_params->rank_enables & 2)
2409 if (mrc_params->dram_width == X16) {
2415 * Density encoding in struct dram_params: 0=512Mb, 1=Gb, 2=2Gb, 3=4Gb
2416 * has to be mapped RANKDENSx encoding (0=1Gb)
2421 drp |= ((density - 1) << 6);
2422 drp |= ((density - 1) << 11);
2424 /* Address mode can be overwritten if ECC enabled */
2425 drp |= (mrc_params->address_mode << 14);
2427 msg_port_write(MEM_CTLR, DRP, drp);
2431 msg_port_write(MEM_CTLR, DCO, dco);
2436 /* Send DRAM wake command */
2437 void perform_wake(struct mrc_params *mrc_params)
2441 dram_wake_command();
2447 * Configure refresh rate and short ZQ calibration interval
2448 * Activate dynamic self refresh
2450 void change_refresh_period(struct mrc_params *mrc_params)
2458 drfc = msg_port_read(MEM_CTLR, DRFC);
2459 drfc &= ~DRFC_TREFI_MASK;
2460 drfc |= (mrc_params->refresh_rate << 12);
2461 drfc |= DRFC_REFDBTCLR;
2462 msg_port_write(MEM_CTLR, DRFC, drfc);
2464 dcal = msg_port_read(MEM_CTLR, DCAL);
2465 dcal &= ~DCAL_ZQCINT_MASK;
2466 dcal |= (3 << 8); /* 63ms */
2467 msg_port_write(MEM_CTLR, DCAL, dcal);
2469 dpmc0 = msg_port_read(MEM_CTLR, DPMC0);
2470 dpmc0 |= (DPMC0_DYNSREN | DPMC0_ENPHYCLKGATE);
2471 msg_port_write(MEM_CTLR, DPMC0, dpmc0);
2477 * Configure DDRPHY for Auto-Refresh, Periodic Compensations,
2478 * Dynamic Diff-Amp, ZQSPERIOD, Auto-Precharge, CKE Power-Down
2480 void set_auto_refresh(struct mrc_params *mrc_params)
2485 uint32_t bl_divisor = 1;
2491 * Enable Auto-Refresh, Periodic Compensations, Dynamic Diff-Amp,
2492 * ZQSPERIOD, Auto-Precharge, CKE Power-Down
2494 for (channel = 0; channel < NUM_CHANNELS; channel++) {
2495 if (mrc_params->channel_enables & (1 << channel)) {
2496 /* Enable Periodic RCOMPS */
2497 mrc_alt_write_mask(DDRPHY, CMPCTRL, 2, 2);
2499 /* Enable Dynamic DiffAmp & Set Read ODT Value */
2500 switch (mrc_params->rd_odt_value) {
2502 temp = 0x3f; /* OFF */
2505 temp = 0x00; /* Auto */
2509 for (bl = 0; bl < (NUM_BYTE_LANES / bl_divisor) / 2; bl++) {
2510 /* Override: DIFFAMP, ODT */
2511 mrc_alt_write_mask(DDRPHY,
2512 B0OVRCTL + bl * DDRIODQ_BL_OFFSET +
2513 channel * DDRIODQ_CH_OFFSET,
2517 /* Override: DIFFAMP, ODT */
2518 mrc_alt_write_mask(DDRPHY,
2519 B1OVRCTL + bl * DDRIODQ_BL_OFFSET +
2520 channel * DDRIODQ_CH_OFFSET,
2525 /* Issue ZQCS command */
2526 for (rank = 0; rank < NUM_RANKS; rank++) {
2527 if (mrc_params->rank_enables & (1 << rank))
2528 dram_init_command(DCMD_ZQCS(rank));
2539 * Depending on configuration enables ECC support
2541 * Available memory size is decreased, and updated with 0s
2542 * in order to clear error status. Address mode 2 forced.
2544 void ecc_enable(struct mrc_params *mrc_params)
2550 if (mrc_params->ecc_enables == 0)
2555 /* Configuration required in ECC mode */
2556 drp = msg_port_read(MEM_CTLR, DRP);
2557 drp &= ~DRP_ADDRMAP_MASK;
2558 drp |= DRP_ADDRMAP_MAP1;
2559 drp |= DRP_PRI64BSPLITEN;
2560 msg_port_write(MEM_CTLR, DRP, drp);
2562 /* Disable new request bypass */
2563 dsch = msg_port_read(MEM_CTLR, DSCH);
2564 dsch |= DSCH_NEWBYPDIS;
2565 msg_port_write(MEM_CTLR, DSCH, dsch);
2568 ecc_ctrl = (DECCCTRL_SBEEN | DECCCTRL_DBEEN | DECCCTRL_ENCBGEN);
2569 msg_port_write(MEM_CTLR, DECCCTRL, ecc_ctrl);
2571 /* Assume 8 bank memory, one bank is gone for ECC */
2572 mrc_params->mem_size -= mrc_params->mem_size / 8;
2574 /* For S3 resume memory content has to be preserved */
2575 if (mrc_params->boot_mode != BM_S3) {
2577 hte_mem_init(mrc_params, MRC_MEM_INIT);
2585 * Execute memory test
2586 * if error detected it is indicated in mrc_params->status
2588 void memory_test(struct mrc_params *mrc_params)
2590 uint32_t result = 0;
2595 result = hte_mem_init(mrc_params, MRC_MEM_TEST);
2598 DPF(D_INFO, "Memory test result %x\n", result);
2599 mrc_params->status = ((result == 0) ? MRC_SUCCESS : MRC_E_MEMTEST);
2603 /* Lock MCU registers at the end of initialization sequence */
2604 void lock_registers(struct mrc_params *mrc_params)
2610 dco = msg_port_read(MEM_CTLR, DCO);
2611 dco &= ~(DCO_PMICTL | DCO_PMIDIS);
2612 dco |= (DCO_DRPLOCK | DCO_CPGCLOCK);
2613 msg_port_write(MEM_CTLR, DCO, dco);