2 * Copyright Altera Corporation (C) 2012-2015
4 * SPDX-License-Identifier: BSD-3-Clause
9 #include <asm/arch/sdram.h>
10 #include "sequencer.h"
11 #include "sequencer_auto.h"
12 #include "sequencer_auto_ac_init.h"
13 #include "sequencer_auto_inst_init.h"
14 #include "sequencer_defines.h"
16 static struct socfpga_sdr_rw_load_manager *sdr_rw_load_mgr_regs =
17 (struct socfpga_sdr_rw_load_manager *)(SDR_PHYGRP_RWMGRGRP_ADDRESS | 0x800);
19 static struct socfpga_sdr_rw_load_jump_manager *sdr_rw_load_jump_mgr_regs =
20 (struct socfpga_sdr_rw_load_jump_manager *)(SDR_PHYGRP_RWMGRGRP_ADDRESS | 0xC00);
22 static struct socfpga_sdr_reg_file *sdr_reg_file =
23 (struct socfpga_sdr_reg_file *)SDR_PHYGRP_REGFILEGRP_ADDRESS;
25 static struct socfpga_sdr_scc_mgr *sdr_scc_mgr =
26 (struct socfpga_sdr_scc_mgr *)(SDR_PHYGRP_SCCGRP_ADDRESS | 0xe00);
28 static struct socfpga_phy_mgr_cmd *phy_mgr_cmd =
29 (struct socfpga_phy_mgr_cmd *)SDR_PHYGRP_PHYMGRGRP_ADDRESS;
31 static struct socfpga_phy_mgr_cfg *phy_mgr_cfg =
32 (struct socfpga_phy_mgr_cfg *)(SDR_PHYGRP_PHYMGRGRP_ADDRESS | 0x40);
34 static struct socfpga_data_mgr *data_mgr =
35 (struct socfpga_data_mgr *)SDR_PHYGRP_DATAMGRGRP_ADDRESS;
37 static struct socfpga_sdr_ctrl *sdr_ctrl =
38 (struct socfpga_sdr_ctrl *)SDR_CTRLGRP_ADDRESS;
43 * In order to reduce ROM size, most of the selectable calibration steps are
44 * decided at compile time based on the user's calibration mode selection,
45 * as captured by the STATIC_CALIB_STEPS selection below.
47 * However, to support simulation-time selection of fast simulation mode, where
48 * we skip everything except the bare minimum, we need a few of the steps to
49 * be dynamic. In those cases, we either use the DYNAMIC_CALIB_STEPS for the
50 * check, which is based on the rtl-supplied value, or we dynamically compute
51 * the value to use based on the dynamically-chosen calibration mode
55 #define STATIC_IN_RTL_SIM 0
56 #define STATIC_SKIP_DELAY_LOOPS 0
58 #define STATIC_CALIB_STEPS (STATIC_IN_RTL_SIM | CALIB_SKIP_FULL_TEST | \
59 STATIC_SKIP_DELAY_LOOPS)
61 /* calibration steps requested by the rtl */
62 uint16_t dyn_calib_steps;
65 * To make CALIB_SKIP_DELAY_LOOPS a dynamic conditional option
66 * instead of static, we use boolean logic to select between
67 * non-skip and skip values
69 * The mask is set to include all bits when not-skipping, but is
73 uint16_t skip_delay_mask; /* mask off bits when skipping/not-skipping */
75 #define SKIP_DELAY_LOOP_VALUE_OR_ZERO(non_skip_value) \
76 ((non_skip_value) & skip_delay_mask)
79 struct param_type *param;
80 uint32_t curr_shadow_reg;
82 static uint32_t rw_mgr_mem_calibrate_write_test(uint32_t rank_bgn,
83 uint32_t write_group, uint32_t use_dm,
84 uint32_t all_correct, uint32_t *bit_chk, uint32_t all_ranks);
86 static void set_failing_group_stage(uint32_t group, uint32_t stage,
90 * Only set the global stage if there was not been any other
93 if (gbl->error_stage == CAL_STAGE_NIL) {
94 gbl->error_substage = substage;
95 gbl->error_stage = stage;
96 gbl->error_group = group;
100 static void reg_file_set_group(u16 set_group)
102 clrsetbits_le32(&sdr_reg_file->cur_stage, 0xffff0000, set_group << 16);
105 static void reg_file_set_stage(u8 set_stage)
107 clrsetbits_le32(&sdr_reg_file->cur_stage, 0xffff, set_stage & 0xff);
110 static void reg_file_set_sub_stage(u8 set_sub_stage)
112 set_sub_stage &= 0xff;
113 clrsetbits_le32(&sdr_reg_file->cur_stage, 0xff00, set_sub_stage << 8);
116 static void initialize(void)
118 debug("%s:%d\n", __func__, __LINE__);
119 /* USER calibration has control over path to memory */
121 * In Hard PHY this is a 2-bit control:
125 writel(0x3, &phy_mgr_cfg->mux_sel);
127 /* USER memory clock is not stable we begin initialization */
128 writel(0, &phy_mgr_cfg->reset_mem_stbl);
130 /* USER calibration status all set to zero */
131 writel(0, &phy_mgr_cfg->cal_status);
133 writel(0, &phy_mgr_cfg->cal_debug_info);
135 if ((dyn_calib_steps & CALIB_SKIP_ALL) != CALIB_SKIP_ALL) {
136 param->read_correct_mask_vg = ((uint32_t)1 <<
137 (RW_MGR_MEM_DQ_PER_READ_DQS /
138 RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS)) - 1;
139 param->write_correct_mask_vg = ((uint32_t)1 <<
140 (RW_MGR_MEM_DQ_PER_READ_DQS /
141 RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS)) - 1;
142 param->read_correct_mask = ((uint32_t)1 <<
143 RW_MGR_MEM_DQ_PER_READ_DQS) - 1;
144 param->write_correct_mask = ((uint32_t)1 <<
145 RW_MGR_MEM_DQ_PER_WRITE_DQS) - 1;
146 param->dm_correct_mask = ((uint32_t)1 <<
147 (RW_MGR_MEM_DATA_WIDTH / RW_MGR_MEM_DATA_MASK_WIDTH))
152 static void set_rank_and_odt_mask(uint32_t rank, uint32_t odt_mode)
154 uint32_t odt_mask_0 = 0;
155 uint32_t odt_mask_1 = 0;
156 uint32_t cs_and_odt_mask;
158 if (odt_mode == RW_MGR_ODT_MODE_READ_WRITE) {
159 if (RW_MGR_MEM_NUMBER_OF_RANKS == 1) {
167 } else if (RW_MGR_MEM_NUMBER_OF_RANKS == 2) {
169 if (RW_MGR_MEM_NUMBER_OF_CS_PER_DIMM == 1) {
170 /* - Dual-Slot , Single-Rank
171 * (1 chip-select per DIMM)
173 * - RDIMM, 4 total CS (2 CS per DIMM)
175 * Since MEM_NUMBER_OF_RANKS is 2 they are
177 * with 2 CS each (special for RDIMM)
178 * Read: Turn on ODT on the opposite rank
179 * Write: Turn on ODT on all ranks
181 odt_mask_0 = 0x3 & ~(1 << rank);
185 * USER - Single-Slot , Dual-rank DIMMs
186 * (2 chip-selects per DIMM)
187 * USER Read: Turn on ODT off on all ranks
188 * USER Write: Turn on ODT on active rank
191 odt_mask_1 = 0x3 & (1 << rank);
196 * ----------+-----------------------+
199 * Read From +-----------------------+
200 * Rank | 3 | 2 | 1 | 0 |
201 * ----------+-----+-----+-----+-----+
202 * 0 | 0 | 1 | 0 | 0 |
203 * 1 | 1 | 0 | 0 | 0 |
204 * 2 | 0 | 0 | 0 | 1 |
205 * 3 | 0 | 0 | 1 | 0 |
206 * ----------+-----+-----+-----+-----+
209 * ----------+-----------------------+
212 * Write To +-----------------------+
213 * Rank | 3 | 2 | 1 | 0 |
214 * ----------+-----+-----+-----+-----+
215 * 0 | 0 | 1 | 0 | 1 |
216 * 1 | 1 | 0 | 1 | 0 |
217 * 2 | 0 | 1 | 0 | 1 |
218 * 3 | 1 | 0 | 1 | 0 |
219 * ----------+-----+-----+-----+-----+
246 (0xFF & ~(1 << rank)) |
247 ((0xFF & odt_mask_0) << 8) |
248 ((0xFF & odt_mask_1) << 16);
249 writel(cs_and_odt_mask, SDR_PHYGRP_RWMGRGRP_ADDRESS |
250 RW_MGR_SET_CS_AND_ODT_MASK_OFFSET);
254 * scc_mgr_set() - Set SCC Manager register
255 * @off: Base offset in SCC Manager space
256 * @grp: Read/Write group
257 * @val: Value to be set
259 * This function sets the SCC Manager (Scan Chain Control Manager) register.
261 static void scc_mgr_set(u32 off, u32 grp, u32 val)
263 writel(val, SDR_PHYGRP_SCCGRP_ADDRESS | off | (grp << 2));
267 * scc_mgr_initialize() - Initialize SCC Manager registers
269 * Initialize SCC Manager registers.
271 static void scc_mgr_initialize(void)
274 * Clear register file for HPS. 16 (2^4) is the size of the
275 * full register file in the scc mgr:
276 * RFILE_DEPTH = 1 + log2(MEM_DQ_PER_DQS + 1 + MEM_DM_PER_DQS +
277 * MEM_IF_READ_DQS_WIDTH - 1);
281 for (i = 0; i < 16; i++) {
282 debug_cond(DLEVEL == 1, "%s:%d: Clearing SCC RFILE index %u\n",
283 __func__, __LINE__, i);
284 scc_mgr_set(SCC_MGR_HHP_RFILE_OFFSET, 0, i);
288 static void scc_mgr_set_dqdqs_output_phase(uint32_t write_group, uint32_t phase)
290 scc_mgr_set(SCC_MGR_DQDQS_OUT_PHASE_OFFSET, write_group, phase);
293 static void scc_mgr_set_dqs_bus_in_delay(uint32_t read_group, uint32_t delay)
295 scc_mgr_set(SCC_MGR_DQS_IN_DELAY_OFFSET, read_group, delay);
298 static void scc_mgr_set_dqs_en_phase(uint32_t read_group, uint32_t phase)
300 scc_mgr_set(SCC_MGR_DQS_EN_PHASE_OFFSET, read_group, phase);
303 static void scc_mgr_set_dqs_en_delay(uint32_t read_group, uint32_t delay)
305 scc_mgr_set(SCC_MGR_DQS_EN_DELAY_OFFSET, read_group, delay);
308 static void scc_mgr_set_dqs_io_in_delay(uint32_t write_group, uint32_t delay)
310 scc_mgr_set(SCC_MGR_IO_IN_DELAY_OFFSET, RW_MGR_MEM_DQ_PER_WRITE_DQS,
314 static void scc_mgr_set_dq_in_delay(uint32_t dq_in_group, uint32_t delay)
316 scc_mgr_set(SCC_MGR_IO_IN_DELAY_OFFSET, dq_in_group, delay);
319 static void scc_mgr_set_dq_out1_delay(uint32_t dq_in_group, uint32_t delay)
321 scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET, dq_in_group, delay);
324 static void scc_mgr_set_dqs_out1_delay(uint32_t write_group,
327 scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET, RW_MGR_MEM_DQ_PER_WRITE_DQS,
331 static void scc_mgr_set_dm_out1_delay(uint32_t dm, uint32_t delay)
333 scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET,
334 RW_MGR_MEM_DQ_PER_WRITE_DQS + 1 + dm,
338 /* load up dqs config settings */
339 static void scc_mgr_load_dqs(uint32_t dqs)
341 writel(dqs, &sdr_scc_mgr->dqs_ena);
344 /* load up dqs io config settings */
345 static void scc_mgr_load_dqs_io(void)
347 writel(0, &sdr_scc_mgr->dqs_io_ena);
350 /* load up dq config settings */
351 static void scc_mgr_load_dq(uint32_t dq_in_group)
353 writel(dq_in_group, &sdr_scc_mgr->dq_ena);
356 /* load up dm config settings */
357 static void scc_mgr_load_dm(uint32_t dm)
359 writel(dm, &sdr_scc_mgr->dm_ena);
363 * scc_mgr_set_all_ranks() - Set SCC Manager register for all ranks
364 * @off: Base offset in SCC Manager space
365 * @grp: Read/Write group
366 * @val: Value to be set
367 * @update: If non-zero, trigger SCC Manager update for all ranks
369 * This function sets the SCC Manager (Scan Chain Control Manager) register
370 * and optionally triggers the SCC update for all ranks.
372 static void scc_mgr_set_all_ranks(const u32 off, const u32 grp, const u32 val,
377 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
378 r += NUM_RANKS_PER_SHADOW_REG) {
379 scc_mgr_set(off, grp, val);
381 if (update || (r == 0)) {
382 writel(grp, &sdr_scc_mgr->dqs_ena);
383 writel(0, &sdr_scc_mgr->update);
388 static void scc_mgr_set_dqs_en_phase_all_ranks(u32 read_group, u32 phase)
391 * USER although the h/w doesn't support different phases per
392 * shadow register, for simplicity our scc manager modeling
393 * keeps different phase settings per shadow reg, and it's
394 * important for us to keep them in sync to match h/w.
395 * for efficiency, the scan chain update should occur only
398 scc_mgr_set_all_ranks(SCC_MGR_DQS_EN_PHASE_OFFSET,
399 read_group, phase, 0);
402 static void scc_mgr_set_dqdqs_output_phase_all_ranks(uint32_t write_group,
406 * USER although the h/w doesn't support different phases per
407 * shadow register, for simplicity our scc manager modeling
408 * keeps different phase settings per shadow reg, and it's
409 * important for us to keep them in sync to match h/w.
410 * for efficiency, the scan chain update should occur only
413 scc_mgr_set_all_ranks(SCC_MGR_DQDQS_OUT_PHASE_OFFSET,
414 write_group, phase, 0);
417 static void scc_mgr_set_dqs_en_delay_all_ranks(uint32_t read_group,
421 * In shadow register mode, the T11 settings are stored in
422 * registers in the core, which are updated by the DQS_ENA
423 * signals. Not issuing the SCC_MGR_UPD command allows us to
424 * save lots of rank switching overhead, by calling
425 * select_shadow_regs_for_update with update_scan_chains
428 scc_mgr_set_all_ranks(SCC_MGR_DQS_EN_DELAY_OFFSET,
429 read_group, delay, 1);
430 writel(0, &sdr_scc_mgr->update);
434 * scc_mgr_set_oct_out1_delay() - Set OCT output delay
435 * @write_group: Write group
436 * @delay: Delay value
438 * This function sets the OCT output delay in SCC manager.
440 static void scc_mgr_set_oct_out1_delay(const u32 write_group, const u32 delay)
442 const int ratio = RW_MGR_MEM_IF_READ_DQS_WIDTH /
443 RW_MGR_MEM_IF_WRITE_DQS_WIDTH;
444 const int base = write_group * ratio;
447 * Load the setting in the SCC manager
448 * Although OCT affects only write data, the OCT delay is controlled
449 * by the DQS logic block which is instantiated once per read group.
450 * For protocols where a write group consists of multiple read groups,
451 * the setting must be set multiple times.
453 for (i = 0; i < ratio; i++)
454 scc_mgr_set(SCC_MGR_OCT_OUT1_DELAY_OFFSET, base + i, delay);
457 static void scc_mgr_set_hhp_extras(void)
460 * Load the fixed setting in the SCC manager
461 * bits: 0:0 = 1'b1 - dqs bypass
462 * bits: 1:1 = 1'b1 - dq bypass
463 * bits: 4:2 = 3'b001 - rfifo_mode
464 * bits: 6:5 = 2'b01 - rfifo clock_select
465 * bits: 7:7 = 1'b0 - separate gating from ungating setting
466 * bits: 8:8 = 1'b0 - separate OE from Output delay setting
468 uint32_t value = (0<<8) | (0<<7) | (1<<5) | (1<<2) | (1<<1) | (1<<0);
469 uint32_t addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_HHP_GLOBALS_OFFSET;
471 writel(value, addr + SCC_MGR_HHP_EXTRAS_OFFSET);
475 * USER Zero all DQS config
476 * TODO: maybe rename to scc_mgr_zero_dqs_config (or something)
478 static void scc_mgr_zero_all(void)
483 * USER Zero all DQS config settings, across all groups and all
486 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r +=
487 NUM_RANKS_PER_SHADOW_REG) {
488 for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) {
490 * The phases actually don't exist on a per-rank basis,
491 * but there's no harm updating them several times, so
492 * let's keep the code simple.
494 scc_mgr_set_dqs_bus_in_delay(i, IO_DQS_IN_RESERVE);
495 scc_mgr_set_dqs_en_phase(i, 0);
496 scc_mgr_set_dqs_en_delay(i, 0);
499 for (i = 0; i < RW_MGR_MEM_IF_WRITE_DQS_WIDTH; i++) {
500 scc_mgr_set_dqdqs_output_phase(i, 0);
501 /* av/cv don't have out2 */
502 scc_mgr_set_oct_out1_delay(i, IO_DQS_OUT_RESERVE);
506 /* multicast to all DQS group enables */
507 writel(0xff, &sdr_scc_mgr->dqs_ena);
508 writel(0, &sdr_scc_mgr->update);
512 * scc_set_bypass_mode() - Set bypass mode and trigger SCC update
513 * @write_group: Write group
515 * Set bypass mode and trigger SCC update.
517 static void scc_set_bypass_mode(const u32 write_group)
519 /* Only needed once to set all groups, pins, DQ, DQS, DM. */
520 if (write_group == 0) {
521 debug_cond(DLEVEL == 1, "%s:%d Setting HHP Extras\n", __func__,
523 scc_mgr_set_hhp_extras();
524 debug_cond(DLEVEL == 1, "%s:%d Done Setting HHP Extras\n",
528 /* Multicast to all DQ enables. */
529 writel(0xff, &sdr_scc_mgr->dq_ena);
530 writel(0xff, &sdr_scc_mgr->dm_ena);
532 /* Update current DQS IO enable. */
533 writel(0, &sdr_scc_mgr->dqs_io_ena);
535 /* Update the DQS logic. */
536 writel(write_group, &sdr_scc_mgr->dqs_ena);
539 writel(0, &sdr_scc_mgr->update);
543 * scc_mgr_load_dqs_for_write_group() - Load DQS settings for Write Group
544 * @write_group: Write group
546 * Load DQS settings for Write Group, do not trigger SCC update.
548 static void scc_mgr_load_dqs_for_write_group(const u32 write_group)
550 const int ratio = RW_MGR_MEM_IF_READ_DQS_WIDTH /
551 RW_MGR_MEM_IF_WRITE_DQS_WIDTH;
552 const int base = write_group * ratio;
555 * Load the setting in the SCC manager
556 * Although OCT affects only write data, the OCT delay is controlled
557 * by the DQS logic block which is instantiated once per read group.
558 * For protocols where a write group consists of multiple read groups,
559 * the setting must be set multiple times.
561 for (i = 0; i < ratio; i++)
562 writel(base + i, &sdr_scc_mgr->dqs_ena);
565 static void scc_mgr_zero_group(uint32_t write_group, uint32_t test_begin,
570 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r +=
571 NUM_RANKS_PER_SHADOW_REG) {
572 /* Zero all DQ config settings */
573 for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
574 scc_mgr_set_dq_out1_delay(i, 0);
576 scc_mgr_set_dq_in_delay(i, 0);
579 /* multicast to all DQ enables */
580 writel(0xff, &sdr_scc_mgr->dq_ena);
582 /* Zero all DM config settings */
583 for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++) {
584 scc_mgr_set_dm_out1_delay(i, 0);
587 /* multicast to all DM enables */
588 writel(0xff, &sdr_scc_mgr->dm_ena);
590 /* zero all DQS io settings */
592 scc_mgr_set_dqs_io_in_delay(write_group, 0);
593 /* av/cv don't have out2 */
594 scc_mgr_set_dqs_out1_delay(write_group, IO_DQS_OUT_RESERVE);
595 scc_mgr_set_oct_out1_delay(write_group, IO_DQS_OUT_RESERVE);
596 scc_mgr_load_dqs_for_write_group(write_group);
598 /* multicast to all DQS IO enables (only 1) */
599 writel(0, &sdr_scc_mgr->dqs_io_ena);
601 /* hit update to zero everything */
602 writel(0, &sdr_scc_mgr->update);
607 * apply and load a particular input delay for the DQ pins in a group
608 * group_bgn is the index of the first dq pin (in the write group)
610 static void scc_mgr_apply_group_dq_in_delay(uint32_t write_group,
611 uint32_t group_bgn, uint32_t delay)
615 for (i = 0, p = group_bgn; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++, p++) {
616 scc_mgr_set_dq_in_delay(p, delay);
622 * scc_mgr_apply_group_dq_out1_delay() - Apply and load an output delay for the DQ pins in a group
623 * @delay: Delay value
625 * Apply and load a particular output delay for the DQ pins in a group.
627 static void scc_mgr_apply_group_dq_out1_delay(const u32 delay)
631 for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
632 scc_mgr_set_dq_out1_delay(i, delay);
637 /* apply and load a particular output delay for the DM pins in a group */
638 static void scc_mgr_apply_group_dm_out1_delay(uint32_t write_group,
643 for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++) {
644 scc_mgr_set_dm_out1_delay(i, delay1);
650 /* apply and load delay on both DQS and OCT out1 */
651 static void scc_mgr_apply_group_dqs_io_and_oct_out1(uint32_t write_group,
654 scc_mgr_set_dqs_out1_delay(write_group, delay);
655 scc_mgr_load_dqs_io();
657 scc_mgr_set_oct_out1_delay(write_group, delay);
658 scc_mgr_load_dqs_for_write_group(write_group);
661 /* apply a delay to the entire output side: DQ, DM, DQS, OCT */
662 static void scc_mgr_apply_group_all_out_delay_add(uint32_t write_group,
666 uint32_t i, p, new_delay;
669 for (i = 0, p = group_bgn; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++, p++) {
670 new_delay = READ_SCC_DQ_OUT2_DELAY;
673 if (new_delay > IO_IO_OUT2_DELAY_MAX) {
674 debug_cond(DLEVEL == 1, "%s:%d (%u, %u, %u) DQ[%u,%u]:\
675 %u > %lu => %lu", __func__, __LINE__,
676 write_group, group_bgn, delay, i, p, new_delay,
677 (long unsigned int)IO_IO_OUT2_DELAY_MAX,
678 (long unsigned int)IO_IO_OUT2_DELAY_MAX);
679 new_delay = IO_IO_OUT2_DELAY_MAX;
686 for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++) {
687 new_delay = READ_SCC_DM_IO_OUT2_DELAY;
690 if (new_delay > IO_IO_OUT2_DELAY_MAX) {
691 debug_cond(DLEVEL == 1, "%s:%d (%u, %u, %u) DM[%u]:\
692 %u > %lu => %lu\n", __func__, __LINE__,
693 write_group, group_bgn, delay, i, new_delay,
694 (long unsigned int)IO_IO_OUT2_DELAY_MAX,
695 (long unsigned int)IO_IO_OUT2_DELAY_MAX);
696 new_delay = IO_IO_OUT2_DELAY_MAX;
703 new_delay = READ_SCC_DQS_IO_OUT2_DELAY;
706 if (new_delay > IO_IO_OUT2_DELAY_MAX) {
707 debug_cond(DLEVEL == 1, "%s:%d (%u, %u, %u) DQS: %u > %d => %d;"
708 " adding %u to OUT1\n", __func__, __LINE__,
709 write_group, group_bgn, delay, new_delay,
710 IO_IO_OUT2_DELAY_MAX, IO_IO_OUT2_DELAY_MAX,
711 new_delay - IO_IO_OUT2_DELAY_MAX);
712 scc_mgr_set_dqs_out1_delay(write_group, new_delay -
713 IO_IO_OUT2_DELAY_MAX);
714 new_delay = IO_IO_OUT2_DELAY_MAX;
717 scc_mgr_load_dqs_io();
720 new_delay = READ_SCC_OCT_OUT2_DELAY;
723 if (new_delay > IO_IO_OUT2_DELAY_MAX) {
724 debug_cond(DLEVEL == 1, "%s:%d (%u, %u, %u) DQS: %u > %d => %d;"
725 " adding %u to OUT1\n", __func__, __LINE__,
726 write_group, group_bgn, delay, new_delay,
727 IO_IO_OUT2_DELAY_MAX, IO_IO_OUT2_DELAY_MAX,
728 new_delay - IO_IO_OUT2_DELAY_MAX);
729 scc_mgr_set_oct_out1_delay(write_group, new_delay -
730 IO_IO_OUT2_DELAY_MAX);
731 new_delay = IO_IO_OUT2_DELAY_MAX;
734 scc_mgr_load_dqs_for_write_group(write_group);
738 * USER apply a delay to the entire output side (DQ, DM, DQS, OCT)
741 static void scc_mgr_apply_group_all_out_delay_add_all_ranks(
742 uint32_t write_group, uint32_t group_bgn, uint32_t delay)
746 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
747 r += NUM_RANKS_PER_SHADOW_REG) {
748 scc_mgr_apply_group_all_out_delay_add(write_group,
750 writel(0, &sdr_scc_mgr->update);
754 /* optimization used to recover some slots in ddr3 inst_rom */
755 /* could be applied to other protocols if we wanted to */
756 static void set_jump_as_return(void)
759 * to save space, we replace return with jump to special shared
760 * RETURN instruction so we set the counter to large value so that
763 writel(0xff, &sdr_rw_load_mgr_regs->load_cntr0);
764 writel(RW_MGR_RETURN, &sdr_rw_load_jump_mgr_regs->load_jump_add0);
768 * should always use constants as argument to ensure all computations are
769 * performed at compile time
771 static void delay_for_n_mem_clocks(const uint32_t clocks)
778 debug("%s:%d: clocks=%u ... start\n", __func__, __LINE__, clocks);
781 afi_clocks = (clocks + AFI_RATE_RATIO-1) / AFI_RATE_RATIO;
782 /* scale (rounding up) to get afi clocks */
785 * Note, we don't bother accounting for being off a little bit
786 * because of a few extra instructions in outer loops
787 * Note, the loops have a test at the end, and do the test before
788 * the decrement, and so always perform the loop
789 * 1 time more than the counter value
791 if (afi_clocks == 0) {
793 } else if (afi_clocks <= 0x100) {
794 inner = afi_clocks-1;
797 } else if (afi_clocks <= 0x10000) {
799 outer = (afi_clocks-1) >> 8;
804 c_loop = (afi_clocks-1) >> 16;
808 * rom instructions are structured as follows:
810 * IDLE_LOOP2: jnz cntr0, TARGET_A
811 * IDLE_LOOP1: jnz cntr1, TARGET_B
814 * so, when doing nested loops, TARGET_A is set to IDLE_LOOP2, and
815 * TARGET_B is set to IDLE_LOOP2 as well
817 * if we have no outer loop, though, then we can use IDLE_LOOP1 only,
818 * and set TARGET_B to IDLE_LOOP1 and we skip IDLE_LOOP2 entirely
820 * a little confusing, but it helps save precious space in the inst_rom
821 * and sequencer rom and keeps the delays more accurate and reduces
824 if (afi_clocks <= 0x100) {
825 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(inner),
826 &sdr_rw_load_mgr_regs->load_cntr1);
828 writel(RW_MGR_IDLE_LOOP1,
829 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
831 writel(RW_MGR_IDLE_LOOP1, SDR_PHYGRP_RWMGRGRP_ADDRESS |
832 RW_MGR_RUN_SINGLE_GROUP_OFFSET);
834 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(inner),
835 &sdr_rw_load_mgr_regs->load_cntr0);
837 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(outer),
838 &sdr_rw_load_mgr_regs->load_cntr1);
840 writel(RW_MGR_IDLE_LOOP2,
841 &sdr_rw_load_jump_mgr_regs->load_jump_add0);
843 writel(RW_MGR_IDLE_LOOP2,
844 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
846 /* hack to get around compiler not being smart enough */
847 if (afi_clocks <= 0x10000) {
848 /* only need to run once */
849 writel(RW_MGR_IDLE_LOOP2, SDR_PHYGRP_RWMGRGRP_ADDRESS |
850 RW_MGR_RUN_SINGLE_GROUP_OFFSET);
853 writel(RW_MGR_IDLE_LOOP2,
854 SDR_PHYGRP_RWMGRGRP_ADDRESS |
855 RW_MGR_RUN_SINGLE_GROUP_OFFSET);
856 } while (c_loop-- != 0);
859 debug("%s:%d clocks=%u ... end\n", __func__, __LINE__, clocks);
862 static void rw_mgr_mem_initialize(void)
865 uint32_t grpaddr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
866 RW_MGR_RUN_SINGLE_GROUP_OFFSET;
868 debug("%s:%d\n", __func__, __LINE__);
870 /* The reset / cke part of initialization is broadcasted to all ranks */
871 writel(RW_MGR_RANK_ALL, SDR_PHYGRP_RWMGRGRP_ADDRESS |
872 RW_MGR_SET_CS_AND_ODT_MASK_OFFSET);
875 * Here's how you load register for a loop
876 * Counters are located @ 0x800
877 * Jump address are located @ 0xC00
878 * For both, registers 0 to 3 are selected using bits 3 and 2, like
879 * in 0x800, 0x804, 0x808, 0x80C and 0xC00, 0xC04, 0xC08, 0xC0C
880 * I know this ain't pretty, but Avalon bus throws away the 2 least
884 /* start with memory RESET activated */
889 * 200us @ 266MHz (3.75 ns) ~ 54000 clock cycles
890 * If a and b are the number of iteration in 2 nested loops
891 * it takes the following number of cycles to complete the operation:
892 * number_of_cycles = ((2 + n) * a + 2) * b
893 * where n is the number of instruction in the inner loop
894 * One possible solution is n = 0 , a = 256 , b = 106 => a = FF,
899 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TINIT_CNTR0_VAL),
900 &sdr_rw_load_mgr_regs->load_cntr0);
901 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TINIT_CNTR1_VAL),
902 &sdr_rw_load_mgr_regs->load_cntr1);
903 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TINIT_CNTR2_VAL),
904 &sdr_rw_load_mgr_regs->load_cntr2);
906 /* Load jump address */
907 writel(RW_MGR_INIT_RESET_0_CKE_0,
908 &sdr_rw_load_jump_mgr_regs->load_jump_add0);
909 writel(RW_MGR_INIT_RESET_0_CKE_0,
910 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
911 writel(RW_MGR_INIT_RESET_0_CKE_0,
912 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
914 /* Execute count instruction */
915 writel(RW_MGR_INIT_RESET_0_CKE_0, grpaddr);
917 /* indicate that memory is stable */
918 writel(1, &phy_mgr_cfg->reset_mem_stbl);
921 * transition the RESET to high
926 * 500us @ 266MHz (3.75 ns) ~ 134000 clock cycles
927 * If a and b are the number of iteration in 2 nested loops
928 * it takes the following number of cycles to complete the operation
929 * number_of_cycles = ((2 + n) * a + 2) * b
930 * where n is the number of instruction in the inner loop
931 * One possible solution is n = 2 , a = 131 , b = 256 => a = 83,
936 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TRESET_CNTR0_VAL),
937 &sdr_rw_load_mgr_regs->load_cntr0);
938 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TRESET_CNTR1_VAL),
939 &sdr_rw_load_mgr_regs->load_cntr1);
940 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TRESET_CNTR2_VAL),
941 &sdr_rw_load_mgr_regs->load_cntr2);
943 /* Load jump address */
944 writel(RW_MGR_INIT_RESET_1_CKE_0,
945 &sdr_rw_load_jump_mgr_regs->load_jump_add0);
946 writel(RW_MGR_INIT_RESET_1_CKE_0,
947 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
948 writel(RW_MGR_INIT_RESET_1_CKE_0,
949 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
951 writel(RW_MGR_INIT_RESET_1_CKE_0, grpaddr);
953 /* bring up clock enable */
955 /* tXRP < 250 ck cycles */
956 delay_for_n_mem_clocks(250);
958 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r++) {
959 if (param->skip_ranks[r]) {
960 /* request to skip the rank */
965 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_OFF);
968 * USER Use Mirror-ed commands for odd ranks if address
971 if ((RW_MGR_MEM_ADDRESS_MIRRORING >> r) & 0x1) {
972 set_jump_as_return();
973 writel(RW_MGR_MRS2_MIRR, grpaddr);
974 delay_for_n_mem_clocks(4);
975 set_jump_as_return();
976 writel(RW_MGR_MRS3_MIRR, grpaddr);
977 delay_for_n_mem_clocks(4);
978 set_jump_as_return();
979 writel(RW_MGR_MRS1_MIRR, grpaddr);
980 delay_for_n_mem_clocks(4);
981 set_jump_as_return();
982 writel(RW_MGR_MRS0_DLL_RESET_MIRR, grpaddr);
984 set_jump_as_return();
985 writel(RW_MGR_MRS2, grpaddr);
986 delay_for_n_mem_clocks(4);
987 set_jump_as_return();
988 writel(RW_MGR_MRS3, grpaddr);
989 delay_for_n_mem_clocks(4);
990 set_jump_as_return();
991 writel(RW_MGR_MRS1, grpaddr);
992 set_jump_as_return();
993 writel(RW_MGR_MRS0_DLL_RESET, grpaddr);
995 set_jump_as_return();
996 writel(RW_MGR_ZQCL, grpaddr);
998 /* tZQinit = tDLLK = 512 ck cycles */
999 delay_for_n_mem_clocks(512);
1004 * At the end of calibration we have to program the user settings in, and
1005 * USER hand off the memory to the user.
1007 static void rw_mgr_mem_handoff(void)
1010 uint32_t grpaddr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
1011 RW_MGR_RUN_SINGLE_GROUP_OFFSET;
1013 debug("%s:%d\n", __func__, __LINE__);
1014 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r++) {
1015 if (param->skip_ranks[r])
1016 /* request to skip the rank */
1019 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_OFF);
1021 /* precharge all banks ... */
1022 writel(RW_MGR_PRECHARGE_ALL, grpaddr);
1024 /* load up MR settings specified by user */
1027 * Use Mirror-ed commands for odd ranks if address
1030 if ((RW_MGR_MEM_ADDRESS_MIRRORING >> r) & 0x1) {
1031 set_jump_as_return();
1032 writel(RW_MGR_MRS2_MIRR, grpaddr);
1033 delay_for_n_mem_clocks(4);
1034 set_jump_as_return();
1035 writel(RW_MGR_MRS3_MIRR, grpaddr);
1036 delay_for_n_mem_clocks(4);
1037 set_jump_as_return();
1038 writel(RW_MGR_MRS1_MIRR, grpaddr);
1039 delay_for_n_mem_clocks(4);
1040 set_jump_as_return();
1041 writel(RW_MGR_MRS0_USER_MIRR, grpaddr);
1043 set_jump_as_return();
1044 writel(RW_MGR_MRS2, grpaddr);
1045 delay_for_n_mem_clocks(4);
1046 set_jump_as_return();
1047 writel(RW_MGR_MRS3, grpaddr);
1048 delay_for_n_mem_clocks(4);
1049 set_jump_as_return();
1050 writel(RW_MGR_MRS1, grpaddr);
1051 delay_for_n_mem_clocks(4);
1052 set_jump_as_return();
1053 writel(RW_MGR_MRS0_USER, grpaddr);
1056 * USER need to wait tMOD (12CK or 15ns) time before issuing
1057 * other commands, but we will have plenty of NIOS cycles before
1058 * actual handoff so its okay.
1064 * performs a guaranteed read on the patterns we are going to use during a
1065 * read test to ensure memory works
1067 static uint32_t rw_mgr_mem_calibrate_read_test_patterns(uint32_t rank_bgn,
1068 uint32_t group, uint32_t num_tries, uint32_t *bit_chk,
1072 uint32_t correct_mask_vg;
1073 uint32_t tmp_bit_chk;
1074 uint32_t rank_end = all_ranks ? RW_MGR_MEM_NUMBER_OF_RANKS :
1075 (rank_bgn + NUM_RANKS_PER_SHADOW_REG);
1077 uint32_t base_rw_mgr;
1079 *bit_chk = param->read_correct_mask;
1080 correct_mask_vg = param->read_correct_mask_vg;
1082 for (r = rank_bgn; r < rank_end; r++) {
1083 if (param->skip_ranks[r])
1084 /* request to skip the rank */
1088 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
1090 /* Load up a constant bursts of read commands */
1091 writel(0x20, &sdr_rw_load_mgr_regs->load_cntr0);
1092 writel(RW_MGR_GUARANTEED_READ,
1093 &sdr_rw_load_jump_mgr_regs->load_jump_add0);
1095 writel(0x20, &sdr_rw_load_mgr_regs->load_cntr1);
1096 writel(RW_MGR_GUARANTEED_READ_CONT,
1097 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
1100 for (vg = RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS-1; ; vg--) {
1101 /* reset the fifos to get pointers to known state */
1103 writel(0, &phy_mgr_cmd->fifo_reset);
1104 writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS |
1105 RW_MGR_RESET_READ_DATAPATH_OFFSET);
1107 tmp_bit_chk = tmp_bit_chk << (RW_MGR_MEM_DQ_PER_READ_DQS
1108 / RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS);
1110 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET;
1111 writel(RW_MGR_GUARANTEED_READ, addr +
1112 ((group * RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS +
1115 base_rw_mgr = readl(SDR_PHYGRP_RWMGRGRP_ADDRESS);
1116 tmp_bit_chk = tmp_bit_chk | (correct_mask_vg & (~base_rw_mgr));
1121 *bit_chk &= tmp_bit_chk;
1124 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET;
1125 writel(RW_MGR_CLEAR_DQS_ENABLE, addr + (group << 2));
1127 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
1128 debug_cond(DLEVEL == 1, "%s:%d test_load_patterns(%u,ALL) => (%u == %u) =>\
1129 %lu\n", __func__, __LINE__, group, *bit_chk, param->read_correct_mask,
1130 (long unsigned int)(*bit_chk == param->read_correct_mask));
1131 return *bit_chk == param->read_correct_mask;
1134 static uint32_t rw_mgr_mem_calibrate_read_test_patterns_all_ranks
1135 (uint32_t group, uint32_t num_tries, uint32_t *bit_chk)
1137 return rw_mgr_mem_calibrate_read_test_patterns(0, group,
1138 num_tries, bit_chk, 1);
1141 /* load up the patterns we are going to use during a read test */
1142 static void rw_mgr_mem_calibrate_read_load_patterns(uint32_t rank_bgn,
1146 uint32_t rank_end = all_ranks ? RW_MGR_MEM_NUMBER_OF_RANKS :
1147 (rank_bgn + NUM_RANKS_PER_SHADOW_REG);
1149 debug("%s:%d\n", __func__, __LINE__);
1150 for (r = rank_bgn; r < rank_end; r++) {
1151 if (param->skip_ranks[r])
1152 /* request to skip the rank */
1156 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
1158 /* Load up a constant bursts */
1159 writel(0x20, &sdr_rw_load_mgr_regs->load_cntr0);
1161 writel(RW_MGR_GUARANTEED_WRITE_WAIT0,
1162 &sdr_rw_load_jump_mgr_regs->load_jump_add0);
1164 writel(0x20, &sdr_rw_load_mgr_regs->load_cntr1);
1166 writel(RW_MGR_GUARANTEED_WRITE_WAIT1,
1167 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
1169 writel(0x04, &sdr_rw_load_mgr_regs->load_cntr2);
1171 writel(RW_MGR_GUARANTEED_WRITE_WAIT2,
1172 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
1174 writel(0x04, &sdr_rw_load_mgr_regs->load_cntr3);
1176 writel(RW_MGR_GUARANTEED_WRITE_WAIT3,
1177 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
1179 writel(RW_MGR_GUARANTEED_WRITE, SDR_PHYGRP_RWMGRGRP_ADDRESS |
1180 RW_MGR_RUN_SINGLE_GROUP_OFFSET);
1183 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
1187 * try a read and see if it returns correct data back. has dummy reads
1188 * inserted into the mix used to align dqs enable. has more thorough checks
1189 * than the regular read test.
1191 static uint32_t rw_mgr_mem_calibrate_read_test(uint32_t rank_bgn, uint32_t group,
1192 uint32_t num_tries, uint32_t all_correct, uint32_t *bit_chk,
1193 uint32_t all_groups, uint32_t all_ranks)
1196 uint32_t correct_mask_vg;
1197 uint32_t tmp_bit_chk;
1198 uint32_t rank_end = all_ranks ? RW_MGR_MEM_NUMBER_OF_RANKS :
1199 (rank_bgn + NUM_RANKS_PER_SHADOW_REG);
1201 uint32_t base_rw_mgr;
1203 *bit_chk = param->read_correct_mask;
1204 correct_mask_vg = param->read_correct_mask_vg;
1206 uint32_t quick_read_mode = (((STATIC_CALIB_STEPS) &
1207 CALIB_SKIP_DELAY_SWEEPS) && ENABLE_SUPER_QUICK_CALIBRATION);
1209 for (r = rank_bgn; r < rank_end; r++) {
1210 if (param->skip_ranks[r])
1211 /* request to skip the rank */
1215 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
1217 writel(0x10, &sdr_rw_load_mgr_regs->load_cntr1);
1219 writel(RW_MGR_READ_B2B_WAIT1,
1220 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
1222 writel(0x10, &sdr_rw_load_mgr_regs->load_cntr2);
1223 writel(RW_MGR_READ_B2B_WAIT2,
1224 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
1226 if (quick_read_mode)
1227 writel(0x1, &sdr_rw_load_mgr_regs->load_cntr0);
1228 /* need at least two (1+1) reads to capture failures */
1229 else if (all_groups)
1230 writel(0x06, &sdr_rw_load_mgr_regs->load_cntr0);
1232 writel(0x32, &sdr_rw_load_mgr_regs->load_cntr0);
1234 writel(RW_MGR_READ_B2B,
1235 &sdr_rw_load_jump_mgr_regs->load_jump_add0);
1237 writel(RW_MGR_MEM_IF_READ_DQS_WIDTH *
1238 RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS - 1,
1239 &sdr_rw_load_mgr_regs->load_cntr3);
1241 writel(0x0, &sdr_rw_load_mgr_regs->load_cntr3);
1243 writel(RW_MGR_READ_B2B,
1244 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
1247 for (vg = RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS-1; ; vg--) {
1248 /* reset the fifos to get pointers to known state */
1249 writel(0, &phy_mgr_cmd->fifo_reset);
1250 writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS |
1251 RW_MGR_RESET_READ_DATAPATH_OFFSET);
1253 tmp_bit_chk = tmp_bit_chk << (RW_MGR_MEM_DQ_PER_READ_DQS
1254 / RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS);
1257 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_ALL_GROUPS_OFFSET;
1259 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET;
1261 writel(RW_MGR_READ_B2B, addr +
1262 ((group * RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS +
1265 base_rw_mgr = readl(SDR_PHYGRP_RWMGRGRP_ADDRESS);
1266 tmp_bit_chk = tmp_bit_chk | (correct_mask_vg & ~(base_rw_mgr));
1271 *bit_chk &= tmp_bit_chk;
1274 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET;
1275 writel(RW_MGR_CLEAR_DQS_ENABLE, addr + (group << 2));
1278 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
1279 debug_cond(DLEVEL == 2, "%s:%d read_test(%u,ALL,%u) =>\
1280 (%u == %u) => %lu", __func__, __LINE__, group,
1281 all_groups, *bit_chk, param->read_correct_mask,
1282 (long unsigned int)(*bit_chk ==
1283 param->read_correct_mask));
1284 return *bit_chk == param->read_correct_mask;
1286 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
1287 debug_cond(DLEVEL == 2, "%s:%d read_test(%u,ONE,%u) =>\
1288 (%u != %lu) => %lu\n", __func__, __LINE__,
1289 group, all_groups, *bit_chk, (long unsigned int)0,
1290 (long unsigned int)(*bit_chk != 0x00));
1291 return *bit_chk != 0x00;
1295 static uint32_t rw_mgr_mem_calibrate_read_test_all_ranks(uint32_t group,
1296 uint32_t num_tries, uint32_t all_correct, uint32_t *bit_chk,
1297 uint32_t all_groups)
1299 return rw_mgr_mem_calibrate_read_test(0, group, num_tries, all_correct,
1300 bit_chk, all_groups, 1);
1303 static void rw_mgr_incr_vfifo(uint32_t grp, uint32_t *v)
1305 writel(grp, &phy_mgr_cmd->inc_vfifo_hard_phy);
1309 static void rw_mgr_decr_vfifo(uint32_t grp, uint32_t *v)
1313 for (i = 0; i < VFIFO_SIZE-1; i++)
1314 rw_mgr_incr_vfifo(grp, v);
1317 static int find_vfifo_read(uint32_t grp, uint32_t *bit_chk)
1320 uint32_t fail_cnt = 0;
1321 uint32_t test_status;
1323 for (v = 0; v < VFIFO_SIZE; ) {
1324 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: vfifo %u\n",
1325 __func__, __LINE__, v);
1326 test_status = rw_mgr_mem_calibrate_read_test_all_ranks
1327 (grp, 1, PASS_ONE_BIT, bit_chk, 0);
1335 /* fiddle with FIFO */
1336 rw_mgr_incr_vfifo(grp, &v);
1339 if (v >= VFIFO_SIZE) {
1340 /* no failing read found!! Something must have gone wrong */
1341 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: vfifo failed\n",
1342 __func__, __LINE__);
1349 static int find_working_phase(uint32_t *grp, uint32_t *bit_chk,
1350 uint32_t dtaps_per_ptap, uint32_t *work_bgn,
1351 uint32_t *v, uint32_t *d, uint32_t *p,
1352 uint32_t *i, uint32_t *max_working_cnt)
1354 uint32_t found_begin = 0;
1355 uint32_t tmp_delay = 0;
1356 uint32_t test_status;
1358 for (*d = 0; *d <= dtaps_per_ptap; (*d)++, tmp_delay +=
1359 IO_DELAY_PER_DQS_EN_DCHAIN_TAP) {
1360 *work_bgn = tmp_delay;
1361 scc_mgr_set_dqs_en_delay_all_ranks(*grp, *d);
1363 for (*i = 0; *i < VFIFO_SIZE; (*i)++) {
1364 for (*p = 0; *p <= IO_DQS_EN_PHASE_MAX; (*p)++, *work_bgn +=
1365 IO_DELAY_PER_OPA_TAP) {
1366 scc_mgr_set_dqs_en_phase_all_ranks(*grp, *p);
1369 rw_mgr_mem_calibrate_read_test_all_ranks
1370 (*grp, 1, PASS_ONE_BIT, bit_chk, 0);
1373 *max_working_cnt = 1;
1382 if (*p > IO_DQS_EN_PHASE_MAX)
1383 /* fiddle with FIFO */
1384 rw_mgr_incr_vfifo(*grp, v);
1391 if (*i >= VFIFO_SIZE) {
1392 /* cannot find working solution */
1393 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: no vfifo/\
1394 ptap/dtap\n", __func__, __LINE__);
1401 static void sdr_backup_phase(uint32_t *grp, uint32_t *bit_chk,
1402 uint32_t *work_bgn, uint32_t *v, uint32_t *d,
1403 uint32_t *p, uint32_t *max_working_cnt)
1405 uint32_t found_begin = 0;
1408 /* Special case code for backing up a phase */
1410 *p = IO_DQS_EN_PHASE_MAX;
1411 rw_mgr_decr_vfifo(*grp, v);
1415 tmp_delay = *work_bgn - IO_DELAY_PER_OPA_TAP;
1416 scc_mgr_set_dqs_en_phase_all_ranks(*grp, *p);
1418 for (*d = 0; *d <= IO_DQS_EN_DELAY_MAX && tmp_delay < *work_bgn;
1419 (*d)++, tmp_delay += IO_DELAY_PER_DQS_EN_DCHAIN_TAP) {
1420 scc_mgr_set_dqs_en_delay_all_ranks(*grp, *d);
1422 if (rw_mgr_mem_calibrate_read_test_all_ranks(*grp, 1,
1426 *work_bgn = tmp_delay;
1431 /* We have found a working dtap before the ptap found above */
1432 if (found_begin == 1)
1433 (*max_working_cnt)++;
1436 * Restore VFIFO to old state before we decremented it
1440 if (*p > IO_DQS_EN_PHASE_MAX) {
1442 rw_mgr_incr_vfifo(*grp, v);
1445 scc_mgr_set_dqs_en_delay_all_ranks(*grp, 0);
1448 static int sdr_nonworking_phase(uint32_t *grp, uint32_t *bit_chk,
1449 uint32_t *work_bgn, uint32_t *v, uint32_t *d,
1450 uint32_t *p, uint32_t *i, uint32_t *max_working_cnt,
1453 uint32_t found_end = 0;
1456 *work_end += IO_DELAY_PER_OPA_TAP;
1457 if (*p > IO_DQS_EN_PHASE_MAX) {
1458 /* fiddle with FIFO */
1460 rw_mgr_incr_vfifo(*grp, v);
1463 for (; *i < VFIFO_SIZE + 1; (*i)++) {
1464 for (; *p <= IO_DQS_EN_PHASE_MAX; (*p)++, *work_end
1465 += IO_DELAY_PER_OPA_TAP) {
1466 scc_mgr_set_dqs_en_phase_all_ranks(*grp, *p);
1468 if (!rw_mgr_mem_calibrate_read_test_all_ranks
1469 (*grp, 1, PASS_ONE_BIT, bit_chk, 0)) {
1473 (*max_working_cnt)++;
1480 if (*p > IO_DQS_EN_PHASE_MAX) {
1481 /* fiddle with FIFO */
1482 rw_mgr_incr_vfifo(*grp, v);
1487 if (*i >= VFIFO_SIZE + 1) {
1488 /* cannot see edge of failing read */
1489 debug_cond(DLEVEL == 2, "%s:%d sdr_nonworking_phase: end:\
1490 failed\n", __func__, __LINE__);
1497 static int sdr_find_window_centre(uint32_t *grp, uint32_t *bit_chk,
1498 uint32_t *work_bgn, uint32_t *v, uint32_t *d,
1499 uint32_t *p, uint32_t *work_mid,
1505 *work_mid = (*work_bgn + *work_end) / 2;
1507 debug_cond(DLEVEL == 2, "work_bgn=%d work_end=%d work_mid=%d\n",
1508 *work_bgn, *work_end, *work_mid);
1509 /* Get the middle delay to be less than a VFIFO delay */
1510 for (*p = 0; *p <= IO_DQS_EN_PHASE_MAX;
1511 (*p)++, tmp_delay += IO_DELAY_PER_OPA_TAP)
1513 debug_cond(DLEVEL == 2, "vfifo ptap delay %d\n", tmp_delay);
1514 while (*work_mid > tmp_delay)
1515 *work_mid -= tmp_delay;
1516 debug_cond(DLEVEL == 2, "new work_mid %d\n", *work_mid);
1519 for (*p = 0; *p <= IO_DQS_EN_PHASE_MAX && tmp_delay < *work_mid;
1520 (*p)++, tmp_delay += IO_DELAY_PER_OPA_TAP)
1522 tmp_delay -= IO_DELAY_PER_OPA_TAP;
1523 debug_cond(DLEVEL == 2, "new p %d, tmp_delay=%d\n", (*p) - 1, tmp_delay);
1524 for (*d = 0; *d <= IO_DQS_EN_DELAY_MAX && tmp_delay < *work_mid; (*d)++,
1525 tmp_delay += IO_DELAY_PER_DQS_EN_DCHAIN_TAP)
1527 debug_cond(DLEVEL == 2, "new d %d, tmp_delay=%d\n", *d, tmp_delay);
1529 scc_mgr_set_dqs_en_phase_all_ranks(*grp, (*p) - 1);
1530 scc_mgr_set_dqs_en_delay_all_ranks(*grp, *d);
1533 * push vfifo until we can successfully calibrate. We can do this
1534 * because the largest possible margin in 1 VFIFO cycle.
1536 for (i = 0; i < VFIFO_SIZE; i++) {
1537 debug_cond(DLEVEL == 2, "find_dqs_en_phase: center: vfifo=%u\n",
1539 if (rw_mgr_mem_calibrate_read_test_all_ranks(*grp, 1,
1545 /* fiddle with FIFO */
1546 rw_mgr_incr_vfifo(*grp, v);
1549 if (i >= VFIFO_SIZE) {
1550 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: center: \
1551 failed\n", __func__, __LINE__);
1558 /* find a good dqs enable to use */
1559 static uint32_t rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(uint32_t grp)
1561 uint32_t v, d, p, i;
1562 uint32_t max_working_cnt;
1564 uint32_t dtaps_per_ptap;
1565 uint32_t work_bgn, work_mid, work_end;
1566 uint32_t found_passing_read, found_failing_read, initial_failing_dtap;
1568 debug("%s:%d %u\n", __func__, __LINE__, grp);
1570 reg_file_set_sub_stage(CAL_SUBSTAGE_VFIFO_CENTER);
1572 scc_mgr_set_dqs_en_delay_all_ranks(grp, 0);
1573 scc_mgr_set_dqs_en_phase_all_ranks(grp, 0);
1575 /* ************************************************************** */
1576 /* * Step 0 : Determine number of delay taps for each phase tap * */
1577 dtaps_per_ptap = IO_DELAY_PER_OPA_TAP/IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
1579 /* ********************************************************* */
1580 /* * Step 1 : First push vfifo until we get a failing read * */
1581 v = find_vfifo_read(grp, &bit_chk);
1583 max_working_cnt = 0;
1585 /* ******************************************************** */
1586 /* * step 2: find first working phase, increment in ptaps * */
1588 if (find_working_phase(&grp, &bit_chk, dtaps_per_ptap, &work_bgn, &v, &d,
1589 &p, &i, &max_working_cnt) == 0)
1592 work_end = work_bgn;
1595 * If d is 0 then the working window covers a phase tap and
1596 * we can follow the old procedure otherwise, we've found the beginning,
1597 * and we need to increment the dtaps until we find the end.
1600 /* ********************************************************* */
1601 /* * step 3a: if we have room, back off by one and
1602 increment in dtaps * */
1604 sdr_backup_phase(&grp, &bit_chk, &work_bgn, &v, &d, &p,
1607 /* ********************************************************* */
1608 /* * step 4a: go forward from working phase to non working
1609 phase, increment in ptaps * */
1610 if (sdr_nonworking_phase(&grp, &bit_chk, &work_bgn, &v, &d, &p,
1611 &i, &max_working_cnt, &work_end) == 0)
1614 /* ********************************************************* */
1615 /* * step 5a: back off one from last, increment in dtaps * */
1617 /* Special case code for backing up a phase */
1619 p = IO_DQS_EN_PHASE_MAX;
1620 rw_mgr_decr_vfifo(grp, &v);
1625 work_end -= IO_DELAY_PER_OPA_TAP;
1626 scc_mgr_set_dqs_en_phase_all_ranks(grp, p);
1628 /* * The actual increment of dtaps is done outside of
1629 the if/else loop to share code */
1632 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: v/p: \
1633 vfifo=%u ptap=%u\n", __func__, __LINE__,
1636 /* ******************************************************* */
1637 /* * step 3-5b: Find the right edge of the window using
1639 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase:vfifo=%u \
1640 ptap=%u dtap=%u bgn=%u\n", __func__, __LINE__,
1643 work_end = work_bgn;
1645 /* * The actual increment of dtaps is done outside of the
1646 if/else loop to share code */
1648 /* Only here to counterbalance a subtract later on which is
1649 not needed if this branch of the algorithm is taken */
1653 /* The dtap increment to find the failing edge is done here */
1654 for (; d <= IO_DQS_EN_DELAY_MAX; d++, work_end +=
1655 IO_DELAY_PER_DQS_EN_DCHAIN_TAP) {
1656 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: \
1657 end-2: dtap=%u\n", __func__, __LINE__, d);
1658 scc_mgr_set_dqs_en_delay_all_ranks(grp, d);
1660 if (!rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1,
1667 /* Go back to working dtap */
1669 work_end -= IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
1671 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: v/p/d: vfifo=%u \
1672 ptap=%u dtap=%u end=%u\n", __func__, __LINE__,
1673 v, p, d-1, work_end);
1675 if (work_end < work_bgn) {
1677 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: end-2: \
1678 failed\n", __func__, __LINE__);
1682 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: found range [%u,%u]\n",
1683 __func__, __LINE__, work_bgn, work_end);
1685 /* *************************************************************** */
1687 * * We need to calculate the number of dtaps that equal a ptap
1688 * * To do that we'll back up a ptap and re-find the edge of the
1689 * * window using dtaps
1692 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: calculate dtaps_per_ptap \
1693 for tracking\n", __func__, __LINE__);
1695 /* Special case code for backing up a phase */
1697 p = IO_DQS_EN_PHASE_MAX;
1698 rw_mgr_decr_vfifo(grp, &v);
1699 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: backedup \
1700 cycle/phase: v=%u p=%u\n", __func__, __LINE__,
1704 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: backedup \
1705 phase only: v=%u p=%u", __func__, __LINE__,
1709 scc_mgr_set_dqs_en_phase_all_ranks(grp, p);
1712 * Increase dtap until we first see a passing read (in case the
1713 * window is smaller than a ptap),
1714 * and then a failing read to mark the edge of the window again
1717 /* Find a passing read */
1718 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: find passing read\n",
1719 __func__, __LINE__);
1720 found_passing_read = 0;
1721 found_failing_read = 0;
1722 initial_failing_dtap = d;
1723 for (; d <= IO_DQS_EN_DELAY_MAX; d++) {
1724 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: testing \
1725 read d=%u\n", __func__, __LINE__, d);
1726 scc_mgr_set_dqs_en_delay_all_ranks(grp, d);
1728 if (rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1,
1731 found_passing_read = 1;
1736 if (found_passing_read) {
1737 /* Find a failing read */
1738 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: find failing \
1739 read\n", __func__, __LINE__);
1740 for (d = d + 1; d <= IO_DQS_EN_DELAY_MAX; d++) {
1741 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: \
1742 testing read d=%u\n", __func__, __LINE__, d);
1743 scc_mgr_set_dqs_en_delay_all_ranks(grp, d);
1745 if (!rw_mgr_mem_calibrate_read_test_all_ranks
1746 (grp, 1, PASS_ONE_BIT, &bit_chk, 0)) {
1747 found_failing_read = 1;
1752 debug_cond(DLEVEL == 1, "%s:%d find_dqs_en_phase: failed to \
1753 calculate dtaps", __func__, __LINE__);
1754 debug_cond(DLEVEL == 1, "per ptap. Fall back on static value\n");
1758 * The dynamically calculated dtaps_per_ptap is only valid if we
1759 * found a passing/failing read. If we didn't, it means d hit the max
1760 * (IO_DQS_EN_DELAY_MAX). Otherwise, dtaps_per_ptap retains its
1761 * statically calculated value.
1763 if (found_passing_read && found_failing_read)
1764 dtaps_per_ptap = d - initial_failing_dtap;
1766 writel(dtaps_per_ptap, &sdr_reg_file->dtaps_per_ptap);
1767 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: dtaps_per_ptap=%u \
1768 - %u = %u", __func__, __LINE__, d,
1769 initial_failing_dtap, dtaps_per_ptap);
1771 /* ******************************************** */
1772 /* * step 6: Find the centre of the window * */
1773 if (sdr_find_window_centre(&grp, &bit_chk, &work_bgn, &v, &d, &p,
1774 &work_mid, &work_end) == 0)
1777 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: center found: \
1778 vfifo=%u ptap=%u dtap=%u\n", __func__, __LINE__,
1784 * Try rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase across different
1785 * dq_in_delay values
1788 rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase_sweep_dq_in_delay
1789 (uint32_t write_group, uint32_t read_group, uint32_t test_bgn)
1797 const uint32_t delay_step = IO_IO_IN_DELAY_MAX /
1798 (RW_MGR_MEM_DQ_PER_READ_DQS-1);
1799 /* we start at zero, so have one less dq to devide among */
1801 debug("%s:%d (%u,%u,%u)", __func__, __LINE__, write_group, read_group,
1804 /* try different dq_in_delays since the dq path is shorter than dqs */
1806 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
1807 r += NUM_RANKS_PER_SHADOW_REG) {
1808 for (i = 0, p = test_bgn, d = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS;
1809 i++, p++, d += delay_step) {
1810 debug_cond(DLEVEL == 1, "%s:%d rw_mgr_mem_calibrate_\
1811 vfifo_find_dqs_", __func__, __LINE__);
1812 debug_cond(DLEVEL == 1, "en_phase_sweep_dq_in_delay: g=%u/%u ",
1813 write_group, read_group);
1814 debug_cond(DLEVEL == 1, "r=%u, i=%u p=%u d=%u\n", r, i , p, d);
1815 scc_mgr_set_dq_in_delay(p, d);
1818 writel(0, &sdr_scc_mgr->update);
1821 found = rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(read_group);
1823 debug_cond(DLEVEL == 1, "%s:%d rw_mgr_mem_calibrate_vfifo_find_dqs_\
1824 en_phase_sweep_dq", __func__, __LINE__);
1825 debug_cond(DLEVEL == 1, "_in_delay: g=%u/%u found=%u; Reseting delay \
1826 chain to zero\n", write_group, read_group, found);
1828 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
1829 r += NUM_RANKS_PER_SHADOW_REG) {
1830 for (i = 0, p = test_bgn; i < RW_MGR_MEM_DQ_PER_READ_DQS;
1832 scc_mgr_set_dq_in_delay(p, 0);
1835 writel(0, &sdr_scc_mgr->update);
1841 /* per-bit deskew DQ and center */
1842 static uint32_t rw_mgr_mem_calibrate_vfifo_center(uint32_t rank_bgn,
1843 uint32_t write_group, uint32_t read_group, uint32_t test_bgn,
1844 uint32_t use_read_test, uint32_t update_fom)
1846 uint32_t i, p, d, min_index;
1848 * Store these as signed since there are comparisons with
1852 uint32_t sticky_bit_chk;
1853 int32_t left_edge[RW_MGR_MEM_DQ_PER_READ_DQS];
1854 int32_t right_edge[RW_MGR_MEM_DQ_PER_READ_DQS];
1855 int32_t final_dq[RW_MGR_MEM_DQ_PER_READ_DQS];
1857 int32_t orig_mid_min, mid_min;
1858 int32_t new_dqs, start_dqs, start_dqs_en, shift_dq, final_dqs,
1860 int32_t dq_margin, dqs_margin;
1862 uint32_t temp_dq_in_delay1, temp_dq_in_delay2;
1865 debug("%s:%d: %u %u", __func__, __LINE__, read_group, test_bgn);
1867 addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_DQS_IN_DELAY_OFFSET;
1868 start_dqs = readl(addr + (read_group << 2));
1869 if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS)
1870 start_dqs_en = readl(addr + ((read_group << 2)
1871 - IO_DQS_EN_DELAY_OFFSET));
1873 /* set the left and right edge of each bit to an illegal value */
1874 /* use (IO_IO_IN_DELAY_MAX + 1) as an illegal value */
1876 for (i = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) {
1877 left_edge[i] = IO_IO_IN_DELAY_MAX + 1;
1878 right_edge[i] = IO_IO_IN_DELAY_MAX + 1;
1881 /* Search for the left edge of the window for each bit */
1882 for (d = 0; d <= IO_IO_IN_DELAY_MAX; d++) {
1883 scc_mgr_apply_group_dq_in_delay(write_group, test_bgn, d);
1885 writel(0, &sdr_scc_mgr->update);
1888 * Stop searching when the read test doesn't pass AND when
1889 * we've seen a passing read on every bit.
1891 if (use_read_test) {
1892 stop = !rw_mgr_mem_calibrate_read_test(rank_bgn,
1893 read_group, NUM_READ_PB_TESTS, PASS_ONE_BIT,
1896 rw_mgr_mem_calibrate_write_test(rank_bgn, write_group,
1899 bit_chk = bit_chk >> (RW_MGR_MEM_DQ_PER_READ_DQS *
1900 (read_group - (write_group *
1901 RW_MGR_MEM_IF_READ_DQS_WIDTH /
1902 RW_MGR_MEM_IF_WRITE_DQS_WIDTH)));
1903 stop = (bit_chk == 0);
1905 sticky_bit_chk = sticky_bit_chk | bit_chk;
1906 stop = stop && (sticky_bit_chk == param->read_correct_mask);
1907 debug_cond(DLEVEL == 2, "%s:%d vfifo_center(left): dtap=%u => %u == %u \
1908 && %u", __func__, __LINE__, d,
1910 param->read_correct_mask, stop);
1915 for (i = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) {
1917 /* Remember a passing test as the
1921 /* If a left edge has not been seen yet,
1922 then a future passing test will mark
1923 this edge as the right edge */
1925 IO_IO_IN_DELAY_MAX + 1) {
1926 right_edge[i] = -(d + 1);
1929 bit_chk = bit_chk >> 1;
1934 /* Reset DQ delay chains to 0 */
1935 scc_mgr_apply_group_dq_in_delay(write_group, test_bgn, 0);
1937 for (i = RW_MGR_MEM_DQ_PER_READ_DQS - 1;; i--) {
1938 debug_cond(DLEVEL == 2, "%s:%d vfifo_center: left_edge[%u]: \
1939 %d right_edge[%u]: %d\n", __func__, __LINE__,
1940 i, left_edge[i], i, right_edge[i]);
1943 * Check for cases where we haven't found the left edge,
1944 * which makes our assignment of the the right edge invalid.
1945 * Reset it to the illegal value.
1947 if ((left_edge[i] == IO_IO_IN_DELAY_MAX + 1) && (
1948 right_edge[i] != IO_IO_IN_DELAY_MAX + 1)) {
1949 right_edge[i] = IO_IO_IN_DELAY_MAX + 1;
1950 debug_cond(DLEVEL == 2, "%s:%d vfifo_center: reset \
1951 right_edge[%u]: %d\n", __func__, __LINE__,
1956 * Reset sticky bit (except for bits where we have seen
1957 * both the left and right edge).
1959 sticky_bit_chk = sticky_bit_chk << 1;
1960 if ((left_edge[i] != IO_IO_IN_DELAY_MAX + 1) &&
1961 (right_edge[i] != IO_IO_IN_DELAY_MAX + 1)) {
1962 sticky_bit_chk = sticky_bit_chk | 1;
1969 /* Search for the right edge of the window for each bit */
1970 for (d = 0; d <= IO_DQS_IN_DELAY_MAX - start_dqs; d++) {
1971 scc_mgr_set_dqs_bus_in_delay(read_group, d + start_dqs);
1972 if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) {
1973 uint32_t delay = d + start_dqs_en;
1974 if (delay > IO_DQS_EN_DELAY_MAX)
1975 delay = IO_DQS_EN_DELAY_MAX;
1976 scc_mgr_set_dqs_en_delay(read_group, delay);
1978 scc_mgr_load_dqs(read_group);
1980 writel(0, &sdr_scc_mgr->update);
1983 * Stop searching when the read test doesn't pass AND when
1984 * we've seen a passing read on every bit.
1986 if (use_read_test) {
1987 stop = !rw_mgr_mem_calibrate_read_test(rank_bgn,
1988 read_group, NUM_READ_PB_TESTS, PASS_ONE_BIT,
1991 rw_mgr_mem_calibrate_write_test(rank_bgn, write_group,
1994 bit_chk = bit_chk >> (RW_MGR_MEM_DQ_PER_READ_DQS *
1995 (read_group - (write_group *
1996 RW_MGR_MEM_IF_READ_DQS_WIDTH /
1997 RW_MGR_MEM_IF_WRITE_DQS_WIDTH)));
1998 stop = (bit_chk == 0);
2000 sticky_bit_chk = sticky_bit_chk | bit_chk;
2001 stop = stop && (sticky_bit_chk == param->read_correct_mask);
2003 debug_cond(DLEVEL == 2, "%s:%d vfifo_center(right): dtap=%u => %u == \
2004 %u && %u", __func__, __LINE__, d,
2005 sticky_bit_chk, param->read_correct_mask, stop);
2010 for (i = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) {
2012 /* Remember a passing test as
2017 /* If a right edge has not been
2018 seen yet, then a future passing
2019 test will mark this edge as the
2021 if (right_edge[i] ==
2022 IO_IO_IN_DELAY_MAX + 1) {
2023 left_edge[i] = -(d + 1);
2026 /* d = 0 failed, but it passed
2027 when testing the left edge,
2028 so it must be marginal,
2030 if (right_edge[i] ==
2031 IO_IO_IN_DELAY_MAX + 1 &&
2037 /* If a right edge has not been
2038 seen yet, then a future passing
2039 test will mark this edge as the
2041 else if (right_edge[i] ==
2042 IO_IO_IN_DELAY_MAX +
2044 left_edge[i] = -(d + 1);
2049 debug_cond(DLEVEL == 2, "%s:%d vfifo_center[r,\
2050 d=%u]: ", __func__, __LINE__, d);
2051 debug_cond(DLEVEL == 2, "bit_chk_test=%d left_edge[%u]: %d ",
2052 (int)(bit_chk & 1), i, left_edge[i]);
2053 debug_cond(DLEVEL == 2, "right_edge[%u]: %d\n", i,
2055 bit_chk = bit_chk >> 1;
2060 /* Check that all bits have a window */
2061 for (i = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) {
2062 debug_cond(DLEVEL == 2, "%s:%d vfifo_center: left_edge[%u]: \
2063 %d right_edge[%u]: %d", __func__, __LINE__,
2064 i, left_edge[i], i, right_edge[i]);
2065 if ((left_edge[i] == IO_IO_IN_DELAY_MAX + 1) || (right_edge[i]
2066 == IO_IO_IN_DELAY_MAX + 1)) {
2068 * Restore delay chain settings before letting the loop
2069 * in rw_mgr_mem_calibrate_vfifo to retry different
2070 * dqs/ck relationships.
2072 scc_mgr_set_dqs_bus_in_delay(read_group, start_dqs);
2073 if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) {
2074 scc_mgr_set_dqs_en_delay(read_group,
2077 scc_mgr_load_dqs(read_group);
2078 writel(0, &sdr_scc_mgr->update);
2080 debug_cond(DLEVEL == 1, "%s:%d vfifo_center: failed to \
2081 find edge [%u]: %d %d", __func__, __LINE__,
2082 i, left_edge[i], right_edge[i]);
2083 if (use_read_test) {
2084 set_failing_group_stage(read_group *
2085 RW_MGR_MEM_DQ_PER_READ_DQS + i,
2087 CAL_SUBSTAGE_VFIFO_CENTER);
2089 set_failing_group_stage(read_group *
2090 RW_MGR_MEM_DQ_PER_READ_DQS + i,
2091 CAL_STAGE_VFIFO_AFTER_WRITES,
2092 CAL_SUBSTAGE_VFIFO_CENTER);
2098 /* Find middle of window for each DQ bit */
2099 mid_min = left_edge[0] - right_edge[0];
2101 for (i = 1; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) {
2102 mid = left_edge[i] - right_edge[i];
2103 if (mid < mid_min) {
2110 * -mid_min/2 represents the amount that we need to move DQS.
2111 * If mid_min is odd and positive we'll need to add one to
2112 * make sure the rounding in further calculations is correct
2113 * (always bias to the right), so just add 1 for all positive values.
2118 mid_min = mid_min / 2;
2120 debug_cond(DLEVEL == 1, "%s:%d vfifo_center: mid_min=%d (index=%u)\n",
2121 __func__, __LINE__, mid_min, min_index);
2123 /* Determine the amount we can change DQS (which is -mid_min) */
2124 orig_mid_min = mid_min;
2125 new_dqs = start_dqs - mid_min;
2126 if (new_dqs > IO_DQS_IN_DELAY_MAX)
2127 new_dqs = IO_DQS_IN_DELAY_MAX;
2128 else if (new_dqs < 0)
2131 mid_min = start_dqs - new_dqs;
2132 debug_cond(DLEVEL == 1, "vfifo_center: new mid_min=%d new_dqs=%d\n",
2135 if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) {
2136 if (start_dqs_en - mid_min > IO_DQS_EN_DELAY_MAX)
2137 mid_min += start_dqs_en - mid_min - IO_DQS_EN_DELAY_MAX;
2138 else if (start_dqs_en - mid_min < 0)
2139 mid_min += start_dqs_en - mid_min;
2141 new_dqs = start_dqs - mid_min;
2143 debug_cond(DLEVEL == 1, "vfifo_center: start_dqs=%d start_dqs_en=%d \
2144 new_dqs=%d mid_min=%d\n", start_dqs,
2145 IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS ? start_dqs_en : -1,
2148 /* Initialize data for export structures */
2149 dqs_margin = IO_IO_IN_DELAY_MAX + 1;
2150 dq_margin = IO_IO_IN_DELAY_MAX + 1;
2152 /* add delay to bring centre of all DQ windows to the same "level" */
2153 for (i = 0, p = test_bgn; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++, p++) {
2154 /* Use values before divide by 2 to reduce round off error */
2155 shift_dq = (left_edge[i] - right_edge[i] -
2156 (left_edge[min_index] - right_edge[min_index]))/2 +
2157 (orig_mid_min - mid_min);
2159 debug_cond(DLEVEL == 2, "vfifo_center: before: \
2160 shift_dq[%u]=%d\n", i, shift_dq);
2162 addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_IO_IN_DELAY_OFFSET;
2163 temp_dq_in_delay1 = readl(addr + (p << 2));
2164 temp_dq_in_delay2 = readl(addr + (i << 2));
2166 if (shift_dq + (int32_t)temp_dq_in_delay1 >
2167 (int32_t)IO_IO_IN_DELAY_MAX) {
2168 shift_dq = (int32_t)IO_IO_IN_DELAY_MAX - temp_dq_in_delay2;
2169 } else if (shift_dq + (int32_t)temp_dq_in_delay1 < 0) {
2170 shift_dq = -(int32_t)temp_dq_in_delay1;
2172 debug_cond(DLEVEL == 2, "vfifo_center: after: \
2173 shift_dq[%u]=%d\n", i, shift_dq);
2174 final_dq[i] = temp_dq_in_delay1 + shift_dq;
2175 scc_mgr_set_dq_in_delay(p, final_dq[i]);
2178 debug_cond(DLEVEL == 2, "vfifo_center: margin[%u]=[%d,%d]\n", i,
2179 left_edge[i] - shift_dq + (-mid_min),
2180 right_edge[i] + shift_dq - (-mid_min));
2181 /* To determine values for export structures */
2182 if (left_edge[i] - shift_dq + (-mid_min) < dq_margin)
2183 dq_margin = left_edge[i] - shift_dq + (-mid_min);
2185 if (right_edge[i] + shift_dq - (-mid_min) < dqs_margin)
2186 dqs_margin = right_edge[i] + shift_dq - (-mid_min);
2189 final_dqs = new_dqs;
2190 if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS)
2191 final_dqs_en = start_dqs_en - mid_min;
2194 if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) {
2195 scc_mgr_set_dqs_en_delay(read_group, final_dqs_en);
2196 scc_mgr_load_dqs(read_group);
2200 scc_mgr_set_dqs_bus_in_delay(read_group, final_dqs);
2201 scc_mgr_load_dqs(read_group);
2202 debug_cond(DLEVEL == 2, "%s:%d vfifo_center: dq_margin=%d \
2203 dqs_margin=%d", __func__, __LINE__,
2204 dq_margin, dqs_margin);
2207 * Do not remove this line as it makes sure all of our decisions
2208 * have been applied. Apply the update bit.
2210 writel(0, &sdr_scc_mgr->update);
2212 return (dq_margin >= 0) && (dqs_margin >= 0);
2216 * calibrate the read valid prediction FIFO.
2218 * - read valid prediction will consist of finding a good DQS enable phase,
2219 * DQS enable delay, DQS input phase, and DQS input delay.
2220 * - we also do a per-bit deskew on the DQ lines.
2222 static uint32_t rw_mgr_mem_calibrate_vfifo(uint32_t read_group,
2225 uint32_t p, d, rank_bgn, sr;
2226 uint32_t dtaps_per_ptap;
2229 uint32_t grp_calibrated;
2230 uint32_t write_group, write_test_bgn;
2231 uint32_t failed_substage;
2233 debug("%s:%d: %u %u\n", __func__, __LINE__, read_group, test_bgn);
2235 /* update info for sims */
2236 reg_file_set_stage(CAL_STAGE_VFIFO);
2238 write_group = read_group;
2239 write_test_bgn = test_bgn;
2241 /* USER Determine number of delay taps for each phase tap */
2244 while (tmp_delay < IO_DELAY_PER_OPA_TAP) {
2246 tmp_delay += IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
2251 /* update info for sims */
2252 reg_file_set_group(read_group);
2256 reg_file_set_sub_stage(CAL_SUBSTAGE_GUARANTEED_READ);
2257 failed_substage = CAL_SUBSTAGE_GUARANTEED_READ;
2259 for (d = 0; d <= dtaps_per_ptap && grp_calibrated == 0; d += 2) {
2261 * In RLDRAMX we may be messing the delay of pins in
2262 * the same write group but outside of the current read
2263 * the group, but that's ok because we haven't
2264 * calibrated output side yet.
2267 scc_mgr_apply_group_all_out_delay_add_all_ranks
2268 (write_group, write_test_bgn, d);
2271 for (p = 0; p <= IO_DQDQS_OUT_PHASE_MAX && grp_calibrated == 0;
2273 /* set a particular dqdqs phase */
2274 scc_mgr_set_dqdqs_output_phase_all_ranks(read_group, p);
2276 debug_cond(DLEVEL == 1, "%s:%d calibrate_vfifo: g=%u \
2277 p=%u d=%u\n", __func__, __LINE__,
2281 * Load up the patterns used by read calibration
2282 * using current DQDQS phase.
2284 rw_mgr_mem_calibrate_read_load_patterns(0, 1);
2285 if (!(gbl->phy_debug_mode_flags &
2286 PHY_DEBUG_DISABLE_GUARANTEED_READ)) {
2287 if (!rw_mgr_mem_calibrate_read_test_patterns_all_ranks
2288 (read_group, 1, &bit_chk)) {
2289 debug_cond(DLEVEL == 1, "%s:%d Guaranteed read test failed:",
2290 __func__, __LINE__);
2291 debug_cond(DLEVEL == 1, " g=%u p=%u d=%u\n",
2299 if (rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase_sweep_dq_in_delay
2300 (write_group, read_group, test_bgn)) {
2302 * USER Read per-bit deskew can be done on a
2303 * per shadow register basis.
2305 for (rank_bgn = 0, sr = 0;
2306 rank_bgn < RW_MGR_MEM_NUMBER_OF_RANKS;
2307 rank_bgn += NUM_RANKS_PER_SHADOW_REG,
2310 * Determine if this set of ranks
2311 * should be skipped entirely.
2313 if (!param->skip_shadow_regs[sr]) {
2315 * If doing read after write
2316 * calibration, do not update
2317 * FOM, now - do it then.
2319 if (!rw_mgr_mem_calibrate_vfifo_center
2320 (rank_bgn, write_group,
2321 read_group, test_bgn, 1, 0)) {
2324 CAL_SUBSTAGE_VFIFO_CENTER;
2330 failed_substage = CAL_SUBSTAGE_DQS_EN_PHASE;
2335 if (grp_calibrated == 0) {
2336 set_failing_group_stage(write_group, CAL_STAGE_VFIFO,
2342 * Reset the delay chains back to zero if they have moved > 1
2343 * (check for > 1 because loop will increase d even when pass in
2347 scc_mgr_zero_group(write_group, write_test_bgn, 1);
2352 /* VFIFO Calibration -- Read Deskew Calibration after write deskew */
2353 static uint32_t rw_mgr_mem_calibrate_vfifo_end(uint32_t read_group,
2356 uint32_t rank_bgn, sr;
2357 uint32_t grp_calibrated;
2358 uint32_t write_group;
2360 debug("%s:%d %u %u", __func__, __LINE__, read_group, test_bgn);
2362 /* update info for sims */
2364 reg_file_set_stage(CAL_STAGE_VFIFO_AFTER_WRITES);
2365 reg_file_set_sub_stage(CAL_SUBSTAGE_VFIFO_CENTER);
2367 write_group = read_group;
2369 /* update info for sims */
2370 reg_file_set_group(read_group);
2373 /* Read per-bit deskew can be done on a per shadow register basis */
2374 for (rank_bgn = 0, sr = 0; rank_bgn < RW_MGR_MEM_NUMBER_OF_RANKS;
2375 rank_bgn += NUM_RANKS_PER_SHADOW_REG, ++sr) {
2376 /* Determine if this set of ranks should be skipped entirely */
2377 if (!param->skip_shadow_regs[sr]) {
2378 /* This is the last calibration round, update FOM here */
2379 if (!rw_mgr_mem_calibrate_vfifo_center(rank_bgn,
2390 if (grp_calibrated == 0) {
2391 set_failing_group_stage(write_group,
2392 CAL_STAGE_VFIFO_AFTER_WRITES,
2393 CAL_SUBSTAGE_VFIFO_CENTER);
2400 /* Calibrate LFIFO to find smallest read latency */
2401 static uint32_t rw_mgr_mem_calibrate_lfifo(void)
2406 debug("%s:%d\n", __func__, __LINE__);
2408 /* update info for sims */
2409 reg_file_set_stage(CAL_STAGE_LFIFO);
2410 reg_file_set_sub_stage(CAL_SUBSTAGE_READ_LATENCY);
2412 /* Load up the patterns used by read calibration for all ranks */
2413 rw_mgr_mem_calibrate_read_load_patterns(0, 1);
2417 writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat);
2418 debug_cond(DLEVEL == 2, "%s:%d lfifo: read_lat=%u",
2419 __func__, __LINE__, gbl->curr_read_lat);
2421 if (!rw_mgr_mem_calibrate_read_test_all_ranks(0,
2429 /* reduce read latency and see if things are working */
2431 gbl->curr_read_lat--;
2432 } while (gbl->curr_read_lat > 0);
2434 /* reset the fifos to get pointers to known state */
2436 writel(0, &phy_mgr_cmd->fifo_reset);
2439 /* add a fudge factor to the read latency that was determined */
2440 gbl->curr_read_lat += 2;
2441 writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat);
2442 debug_cond(DLEVEL == 2, "%s:%d lfifo: success: using \
2443 read_lat=%u\n", __func__, __LINE__,
2444 gbl->curr_read_lat);
2447 set_failing_group_stage(0xff, CAL_STAGE_LFIFO,
2448 CAL_SUBSTAGE_READ_LATENCY);
2450 debug_cond(DLEVEL == 2, "%s:%d lfifo: failed at initial \
2451 read_lat=%u\n", __func__, __LINE__,
2452 gbl->curr_read_lat);
2458 * issue write test command.
2459 * two variants are provided. one that just tests a write pattern and
2460 * another that tests datamask functionality.
2462 static void rw_mgr_mem_calibrate_write_test_issue(uint32_t group,
2465 uint32_t mcc_instruction;
2466 uint32_t quick_write_mode = (((STATIC_CALIB_STEPS) & CALIB_SKIP_WRITES) &&
2467 ENABLE_SUPER_QUICK_CALIBRATION);
2468 uint32_t rw_wl_nop_cycles;
2472 * Set counter and jump addresses for the right
2473 * number of NOP cycles.
2474 * The number of supported NOP cycles can range from -1 to infinity
2475 * Three different cases are handled:
2477 * 1. For a number of NOP cycles greater than 0, the RW Mgr looping
2478 * mechanism will be used to insert the right number of NOPs
2480 * 2. For a number of NOP cycles equals to 0, the micro-instruction
2481 * issuing the write command will jump straight to the
2482 * micro-instruction that turns on DQS (for DDRx), or outputs write
2483 * data (for RLD), skipping
2484 * the NOP micro-instruction all together
2486 * 3. A number of NOP cycles equal to -1 indicates that DQS must be
2487 * turned on in the same micro-instruction that issues the write
2488 * command. Then we need
2489 * to directly jump to the micro-instruction that sends out the data
2491 * NOTE: Implementing this mechanism uses 2 RW Mgr jump-counters
2492 * (2 and 3). One jump-counter (0) is used to perform multiple
2493 * write-read operations.
2494 * one counter left to issue this command in "multiple-group" mode
2497 rw_wl_nop_cycles = gbl->rw_wl_nop_cycles;
2499 if (rw_wl_nop_cycles == -1) {
2501 * CNTR 2 - We want to execute the special write operation that
2502 * turns on DQS right away and then skip directly to the
2503 * instruction that sends out the data. We set the counter to a
2504 * large number so that the jump is always taken.
2506 writel(0xFF, &sdr_rw_load_mgr_regs->load_cntr2);
2508 /* CNTR 3 - Not used */
2510 mcc_instruction = RW_MGR_LFSR_WR_RD_DM_BANK_0_WL_1;
2511 writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_DATA,
2512 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
2513 writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_NOP,
2514 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
2516 mcc_instruction = RW_MGR_LFSR_WR_RD_BANK_0_WL_1;
2517 writel(RW_MGR_LFSR_WR_RD_BANK_0_DATA,
2518 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
2519 writel(RW_MGR_LFSR_WR_RD_BANK_0_NOP,
2520 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
2522 } else if (rw_wl_nop_cycles == 0) {
2524 * CNTR 2 - We want to skip the NOP operation and go straight
2525 * to the DQS enable instruction. We set the counter to a large
2526 * number so that the jump is always taken.
2528 writel(0xFF, &sdr_rw_load_mgr_regs->load_cntr2);
2530 /* CNTR 3 - Not used */
2532 mcc_instruction = RW_MGR_LFSR_WR_RD_DM_BANK_0;
2533 writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_DQS,
2534 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
2536 mcc_instruction = RW_MGR_LFSR_WR_RD_BANK_0;
2537 writel(RW_MGR_LFSR_WR_RD_BANK_0_DQS,
2538 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
2542 * CNTR 2 - In this case we want to execute the next instruction
2543 * and NOT take the jump. So we set the counter to 0. The jump
2544 * address doesn't count.
2546 writel(0x0, &sdr_rw_load_mgr_regs->load_cntr2);
2547 writel(0x0, &sdr_rw_load_jump_mgr_regs->load_jump_add2);
2550 * CNTR 3 - Set the nop counter to the number of cycles we
2551 * need to loop for, minus 1.
2553 writel(rw_wl_nop_cycles - 1, &sdr_rw_load_mgr_regs->load_cntr3);
2555 mcc_instruction = RW_MGR_LFSR_WR_RD_DM_BANK_0;
2556 writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_NOP,
2557 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
2559 mcc_instruction = RW_MGR_LFSR_WR_RD_BANK_0;
2560 writel(RW_MGR_LFSR_WR_RD_BANK_0_NOP,
2561 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
2565 writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS |
2566 RW_MGR_RESET_READ_DATAPATH_OFFSET);
2568 if (quick_write_mode)
2569 writel(0x08, &sdr_rw_load_mgr_regs->load_cntr0);
2571 writel(0x40, &sdr_rw_load_mgr_regs->load_cntr0);
2573 writel(mcc_instruction, &sdr_rw_load_jump_mgr_regs->load_jump_add0);
2576 * CNTR 1 - This is used to ensure enough time elapses
2577 * for read data to come back.
2579 writel(0x30, &sdr_rw_load_mgr_regs->load_cntr1);
2582 writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_WAIT,
2583 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
2585 writel(RW_MGR_LFSR_WR_RD_BANK_0_WAIT,
2586 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
2589 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET;
2590 writel(mcc_instruction, addr + (group << 2));
2593 /* Test writes, can check for a single bit pass or multiple bit pass */
2594 static uint32_t rw_mgr_mem_calibrate_write_test(uint32_t rank_bgn,
2595 uint32_t write_group, uint32_t use_dm, uint32_t all_correct,
2596 uint32_t *bit_chk, uint32_t all_ranks)
2599 uint32_t correct_mask_vg;
2600 uint32_t tmp_bit_chk;
2602 uint32_t rank_end = all_ranks ? RW_MGR_MEM_NUMBER_OF_RANKS :
2603 (rank_bgn + NUM_RANKS_PER_SHADOW_REG);
2604 uint32_t addr_rw_mgr;
2605 uint32_t base_rw_mgr;
2607 *bit_chk = param->write_correct_mask;
2608 correct_mask_vg = param->write_correct_mask_vg;
2610 for (r = rank_bgn; r < rank_end; r++) {
2611 if (param->skip_ranks[r]) {
2612 /* request to skip the rank */
2617 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
2620 addr_rw_mgr = SDR_PHYGRP_RWMGRGRP_ADDRESS;
2621 for (vg = RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS-1; ; vg--) {
2622 /* reset the fifos to get pointers to known state */
2623 writel(0, &phy_mgr_cmd->fifo_reset);
2625 tmp_bit_chk = tmp_bit_chk <<
2626 (RW_MGR_MEM_DQ_PER_WRITE_DQS /
2627 RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS);
2628 rw_mgr_mem_calibrate_write_test_issue(write_group *
2629 RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS+vg,
2632 base_rw_mgr = readl(addr_rw_mgr);
2633 tmp_bit_chk = tmp_bit_chk | (correct_mask_vg & ~(base_rw_mgr));
2637 *bit_chk &= tmp_bit_chk;
2641 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
2642 debug_cond(DLEVEL == 2, "write_test(%u,%u,ALL) : %u == \
2643 %u => %lu", write_group, use_dm,
2644 *bit_chk, param->write_correct_mask,
2645 (long unsigned int)(*bit_chk ==
2646 param->write_correct_mask));
2647 return *bit_chk == param->write_correct_mask;
2649 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
2650 debug_cond(DLEVEL == 2, "write_test(%u,%u,ONE) : %u != ",
2651 write_group, use_dm, *bit_chk);
2652 debug_cond(DLEVEL == 2, "%lu" " => %lu", (long unsigned int)0,
2653 (long unsigned int)(*bit_chk != 0));
2654 return *bit_chk != 0x00;
2659 * center all windows. do per-bit-deskew to possibly increase size of
2662 static uint32_t rw_mgr_mem_calibrate_writes_center(uint32_t rank_bgn,
2663 uint32_t write_group, uint32_t test_bgn)
2665 uint32_t i, p, min_index;
2668 * Store these as signed since there are comparisons with
2672 uint32_t sticky_bit_chk;
2673 int32_t left_edge[RW_MGR_MEM_DQ_PER_WRITE_DQS];
2674 int32_t right_edge[RW_MGR_MEM_DQ_PER_WRITE_DQS];
2676 int32_t mid_min, orig_mid_min;
2677 int32_t new_dqs, start_dqs, shift_dq;
2678 int32_t dq_margin, dqs_margin, dm_margin;
2680 uint32_t temp_dq_out1_delay;
2683 debug("%s:%d %u %u", __func__, __LINE__, write_group, test_bgn);
2687 addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_IO_OUT1_DELAY_OFFSET;
2688 start_dqs = readl(addr +
2689 (RW_MGR_MEM_DQ_PER_WRITE_DQS << 2));
2691 /* per-bit deskew */
2694 * set the left and right edge of each bit to an illegal value
2695 * use (IO_IO_OUT1_DELAY_MAX + 1) as an illegal value.
2698 for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
2699 left_edge[i] = IO_IO_OUT1_DELAY_MAX + 1;
2700 right_edge[i] = IO_IO_OUT1_DELAY_MAX + 1;
2703 /* Search for the left edge of the window for each bit */
2704 for (d = 0; d <= IO_IO_OUT1_DELAY_MAX; d++) {
2705 scc_mgr_apply_group_dq_out1_delay(write_group, d);
2707 writel(0, &sdr_scc_mgr->update);
2710 * Stop searching when the read test doesn't pass AND when
2711 * we've seen a passing read on every bit.
2713 stop = !rw_mgr_mem_calibrate_write_test(rank_bgn, write_group,
2714 0, PASS_ONE_BIT, &bit_chk, 0);
2715 sticky_bit_chk = sticky_bit_chk | bit_chk;
2716 stop = stop && (sticky_bit_chk == param->write_correct_mask);
2717 debug_cond(DLEVEL == 2, "write_center(left): dtap=%d => %u \
2718 == %u && %u [bit_chk= %u ]\n",
2719 d, sticky_bit_chk, param->write_correct_mask,
2725 for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
2728 * Remember a passing test as the
2734 * If a left edge has not been seen
2735 * yet, then a future passing test will
2736 * mark this edge as the right edge.
2739 IO_IO_OUT1_DELAY_MAX + 1) {
2740 right_edge[i] = -(d + 1);
2743 debug_cond(DLEVEL == 2, "write_center[l,d=%d):", d);
2744 debug_cond(DLEVEL == 2, "bit_chk_test=%d left_edge[%u]: %d",
2745 (int)(bit_chk & 1), i, left_edge[i]);
2746 debug_cond(DLEVEL == 2, "right_edge[%u]: %d\n", i,
2748 bit_chk = bit_chk >> 1;
2753 /* Reset DQ delay chains to 0 */
2754 scc_mgr_apply_group_dq_out1_delay(write_group, 0);
2756 for (i = RW_MGR_MEM_DQ_PER_WRITE_DQS - 1;; i--) {
2757 debug_cond(DLEVEL == 2, "%s:%d write_center: left_edge[%u]: \
2758 %d right_edge[%u]: %d\n", __func__, __LINE__,
2759 i, left_edge[i], i, right_edge[i]);
2762 * Check for cases where we haven't found the left edge,
2763 * which makes our assignment of the the right edge invalid.
2764 * Reset it to the illegal value.
2766 if ((left_edge[i] == IO_IO_OUT1_DELAY_MAX + 1) &&
2767 (right_edge[i] != IO_IO_OUT1_DELAY_MAX + 1)) {
2768 right_edge[i] = IO_IO_OUT1_DELAY_MAX + 1;
2769 debug_cond(DLEVEL == 2, "%s:%d write_center: reset \
2770 right_edge[%u]: %d\n", __func__, __LINE__,
2775 * Reset sticky bit (except for bits where we have
2776 * seen the left edge).
2778 sticky_bit_chk = sticky_bit_chk << 1;
2779 if ((left_edge[i] != IO_IO_OUT1_DELAY_MAX + 1))
2780 sticky_bit_chk = sticky_bit_chk | 1;
2786 /* Search for the right edge of the window for each bit */
2787 for (d = 0; d <= IO_IO_OUT1_DELAY_MAX - start_dqs; d++) {
2788 scc_mgr_apply_group_dqs_io_and_oct_out1(write_group,
2791 writel(0, &sdr_scc_mgr->update);
2794 * Stop searching when the read test doesn't pass AND when
2795 * we've seen a passing read on every bit.
2797 stop = !rw_mgr_mem_calibrate_write_test(rank_bgn, write_group,
2798 0, PASS_ONE_BIT, &bit_chk, 0);
2800 sticky_bit_chk = sticky_bit_chk | bit_chk;
2801 stop = stop && (sticky_bit_chk == param->write_correct_mask);
2803 debug_cond(DLEVEL == 2, "write_center (right): dtap=%u => %u == \
2804 %u && %u\n", d, sticky_bit_chk,
2805 param->write_correct_mask, stop);
2809 for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS;
2811 /* d = 0 failed, but it passed when
2812 testing the left edge, so it must be
2813 marginal, set it to -1 */
2814 if (right_edge[i] ==
2815 IO_IO_OUT1_DELAY_MAX + 1 &&
2817 IO_IO_OUT1_DELAY_MAX + 1) {
2824 for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
2827 * Remember a passing test as
2834 * If a right edge has not
2835 * been seen yet, then a future
2836 * passing test will mark this
2837 * edge as the left edge.
2839 if (right_edge[i] ==
2840 IO_IO_OUT1_DELAY_MAX + 1)
2841 left_edge[i] = -(d + 1);
2844 * d = 0 failed, but it passed
2845 * when testing the left edge,
2846 * so it must be marginal, set
2849 if (right_edge[i] ==
2850 IO_IO_OUT1_DELAY_MAX + 1 &&
2852 IO_IO_OUT1_DELAY_MAX + 1)
2855 * If a right edge has not been
2856 * seen yet, then a future
2857 * passing test will mark this
2858 * edge as the left edge.
2860 else if (right_edge[i] ==
2861 IO_IO_OUT1_DELAY_MAX +
2863 left_edge[i] = -(d + 1);
2866 debug_cond(DLEVEL == 2, "write_center[r,d=%d):", d);
2867 debug_cond(DLEVEL == 2, "bit_chk_test=%d left_edge[%u]: %d",
2868 (int)(bit_chk & 1), i, left_edge[i]);
2869 debug_cond(DLEVEL == 2, "right_edge[%u]: %d\n", i,
2871 bit_chk = bit_chk >> 1;
2876 /* Check that all bits have a window */
2877 for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
2878 debug_cond(DLEVEL == 2, "%s:%d write_center: left_edge[%u]: \
2879 %d right_edge[%u]: %d", __func__, __LINE__,
2880 i, left_edge[i], i, right_edge[i]);
2881 if ((left_edge[i] == IO_IO_OUT1_DELAY_MAX + 1) ||
2882 (right_edge[i] == IO_IO_OUT1_DELAY_MAX + 1)) {
2883 set_failing_group_stage(test_bgn + i,
2885 CAL_SUBSTAGE_WRITES_CENTER);
2890 /* Find middle of window for each DQ bit */
2891 mid_min = left_edge[0] - right_edge[0];
2893 for (i = 1; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
2894 mid = left_edge[i] - right_edge[i];
2895 if (mid < mid_min) {
2902 * -mid_min/2 represents the amount that we need to move DQS.
2903 * If mid_min is odd and positive we'll need to add one to
2904 * make sure the rounding in further calculations is correct
2905 * (always bias to the right), so just add 1 for all positive values.
2909 mid_min = mid_min / 2;
2910 debug_cond(DLEVEL == 1, "%s:%d write_center: mid_min=%d\n", __func__,
2913 /* Determine the amount we can change DQS (which is -mid_min) */
2914 orig_mid_min = mid_min;
2915 new_dqs = start_dqs;
2917 debug_cond(DLEVEL == 1, "%s:%d write_center: start_dqs=%d new_dqs=%d \
2918 mid_min=%d\n", __func__, __LINE__, start_dqs, new_dqs, mid_min);
2919 /* Initialize data for export structures */
2920 dqs_margin = IO_IO_OUT1_DELAY_MAX + 1;
2921 dq_margin = IO_IO_OUT1_DELAY_MAX + 1;
2923 /* add delay to bring centre of all DQ windows to the same "level" */
2924 for (i = 0, p = test_bgn; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++, p++) {
2925 /* Use values before divide by 2 to reduce round off error */
2926 shift_dq = (left_edge[i] - right_edge[i] -
2927 (left_edge[min_index] - right_edge[min_index]))/2 +
2928 (orig_mid_min - mid_min);
2930 debug_cond(DLEVEL == 2, "%s:%d write_center: before: shift_dq \
2931 [%u]=%d\n", __func__, __LINE__, i, shift_dq);
2933 addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_IO_OUT1_DELAY_OFFSET;
2934 temp_dq_out1_delay = readl(addr + (i << 2));
2935 if (shift_dq + (int32_t)temp_dq_out1_delay >
2936 (int32_t)IO_IO_OUT1_DELAY_MAX) {
2937 shift_dq = (int32_t)IO_IO_OUT1_DELAY_MAX - temp_dq_out1_delay;
2938 } else if (shift_dq + (int32_t)temp_dq_out1_delay < 0) {
2939 shift_dq = -(int32_t)temp_dq_out1_delay;
2941 debug_cond(DLEVEL == 2, "write_center: after: shift_dq[%u]=%d\n",
2943 scc_mgr_set_dq_out1_delay(i, temp_dq_out1_delay + shift_dq);
2946 debug_cond(DLEVEL == 2, "write_center: margin[%u]=[%d,%d]\n", i,
2947 left_edge[i] - shift_dq + (-mid_min),
2948 right_edge[i] + shift_dq - (-mid_min));
2949 /* To determine values for export structures */
2950 if (left_edge[i] - shift_dq + (-mid_min) < dq_margin)
2951 dq_margin = left_edge[i] - shift_dq + (-mid_min);
2953 if (right_edge[i] + shift_dq - (-mid_min) < dqs_margin)
2954 dqs_margin = right_edge[i] + shift_dq - (-mid_min);
2958 scc_mgr_apply_group_dqs_io_and_oct_out1(write_group, new_dqs);
2959 writel(0, &sdr_scc_mgr->update);
2962 debug_cond(DLEVEL == 2, "%s:%d write_center: DM\n", __func__, __LINE__);
2965 * set the left and right edge of each bit to an illegal value,
2966 * use (IO_IO_OUT1_DELAY_MAX + 1) as an illegal value,
2968 left_edge[0] = IO_IO_OUT1_DELAY_MAX + 1;
2969 right_edge[0] = IO_IO_OUT1_DELAY_MAX + 1;
2970 int32_t bgn_curr = IO_IO_OUT1_DELAY_MAX + 1;
2971 int32_t end_curr = IO_IO_OUT1_DELAY_MAX + 1;
2972 int32_t bgn_best = IO_IO_OUT1_DELAY_MAX + 1;
2973 int32_t end_best = IO_IO_OUT1_DELAY_MAX + 1;
2974 int32_t win_best = 0;
2976 /* Search for the/part of the window with DM shift */
2977 for (d = IO_IO_OUT1_DELAY_MAX; d >= 0; d -= DELTA_D) {
2978 scc_mgr_apply_group_dm_out1_delay(write_group, d);
2979 writel(0, &sdr_scc_mgr->update);
2981 if (rw_mgr_mem_calibrate_write_test(rank_bgn, write_group, 1,
2982 PASS_ALL_BITS, &bit_chk,
2984 /* USE Set current end of the window */
2987 * If a starting edge of our window has not been seen
2988 * this is our current start of the DM window.
2990 if (bgn_curr == IO_IO_OUT1_DELAY_MAX + 1)
2994 * If current window is bigger than best seen.
2995 * Set best seen to be current window.
2997 if ((end_curr-bgn_curr+1) > win_best) {
2998 win_best = end_curr-bgn_curr+1;
2999 bgn_best = bgn_curr;
3000 end_best = end_curr;
3003 /* We just saw a failing test. Reset temp edge */
3004 bgn_curr = IO_IO_OUT1_DELAY_MAX + 1;
3005 end_curr = IO_IO_OUT1_DELAY_MAX + 1;
3010 /* Reset DM delay chains to 0 */
3011 scc_mgr_apply_group_dm_out1_delay(write_group, 0);
3014 * Check to see if the current window nudges up aganist 0 delay.
3015 * If so we need to continue the search by shifting DQS otherwise DQS
3016 * search begins as a new search. */
3017 if (end_curr != 0) {
3018 bgn_curr = IO_IO_OUT1_DELAY_MAX + 1;
3019 end_curr = IO_IO_OUT1_DELAY_MAX + 1;
3022 /* Search for the/part of the window with DQS shifts */
3023 for (d = 0; d <= IO_IO_OUT1_DELAY_MAX - new_dqs; d += DELTA_D) {
3025 * Note: This only shifts DQS, so are we limiting ourselve to
3026 * width of DQ unnecessarily.
3028 scc_mgr_apply_group_dqs_io_and_oct_out1(write_group,
3031 writel(0, &sdr_scc_mgr->update);
3032 if (rw_mgr_mem_calibrate_write_test(rank_bgn, write_group, 1,
3033 PASS_ALL_BITS, &bit_chk,
3035 /* USE Set current end of the window */
3038 * If a beginning edge of our window has not been seen
3039 * this is our current begin of the DM window.
3041 if (bgn_curr == IO_IO_OUT1_DELAY_MAX + 1)
3045 * If current window is bigger than best seen. Set best
3046 * seen to be current window.
3048 if ((end_curr-bgn_curr+1) > win_best) {
3049 win_best = end_curr-bgn_curr+1;
3050 bgn_best = bgn_curr;
3051 end_best = end_curr;
3054 /* We just saw a failing test. Reset temp edge */
3055 bgn_curr = IO_IO_OUT1_DELAY_MAX + 1;
3056 end_curr = IO_IO_OUT1_DELAY_MAX + 1;
3058 /* Early exit optimization: if ther remaining delay
3059 chain space is less than already seen largest window
3062 (IO_IO_OUT1_DELAY_MAX - new_dqs - d)) {
3068 /* assign left and right edge for cal and reporting; */
3069 left_edge[0] = -1*bgn_best;
3070 right_edge[0] = end_best;
3072 debug_cond(DLEVEL == 2, "%s:%d dm_calib: left=%d right=%d\n", __func__,
3073 __LINE__, left_edge[0], right_edge[0]);
3075 /* Move DQS (back to orig) */
3076 scc_mgr_apply_group_dqs_io_and_oct_out1(write_group, new_dqs);
3080 /* Find middle of window for the DM bit */
3081 mid = (left_edge[0] - right_edge[0]) / 2;
3083 /* only move right, since we are not moving DQS/DQ */
3087 /* dm_marign should fail if we never find a window */
3091 dm_margin = left_edge[0] - mid;
3093 scc_mgr_apply_group_dm_out1_delay(write_group, mid);
3094 writel(0, &sdr_scc_mgr->update);
3096 debug_cond(DLEVEL == 2, "%s:%d dm_calib: left=%d right=%d mid=%d \
3097 dm_margin=%d\n", __func__, __LINE__, left_edge[0],
3098 right_edge[0], mid, dm_margin);
3100 gbl->fom_out += dq_margin + dqs_margin;
3102 debug_cond(DLEVEL == 2, "%s:%d write_center: dq_margin=%d \
3103 dqs_margin=%d dm_margin=%d\n", __func__, __LINE__,
3104 dq_margin, dqs_margin, dm_margin);
3107 * Do not remove this line as it makes sure all of our
3108 * decisions have been applied.
3110 writel(0, &sdr_scc_mgr->update);
3111 return (dq_margin >= 0) && (dqs_margin >= 0) && (dm_margin >= 0);
3114 /* calibrate the write operations */
3115 static uint32_t rw_mgr_mem_calibrate_writes(uint32_t rank_bgn, uint32_t g,
3118 /* update info for sims */
3119 debug("%s:%d %u %u\n", __func__, __LINE__, g, test_bgn);
3121 reg_file_set_stage(CAL_STAGE_WRITES);
3122 reg_file_set_sub_stage(CAL_SUBSTAGE_WRITES_CENTER);
3124 reg_file_set_group(g);
3126 if (!rw_mgr_mem_calibrate_writes_center(rank_bgn, g, test_bgn)) {
3127 set_failing_group_stage(g, CAL_STAGE_WRITES,
3128 CAL_SUBSTAGE_WRITES_CENTER);
3135 /* precharge all banks and activate row 0 in bank "000..." and bank "111..." */
3136 static void mem_precharge_and_activate(void)
3140 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r++) {
3141 if (param->skip_ranks[r]) {
3142 /* request to skip the rank */
3147 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_OFF);
3149 /* precharge all banks ... */
3150 writel(RW_MGR_PRECHARGE_ALL, SDR_PHYGRP_RWMGRGRP_ADDRESS |
3151 RW_MGR_RUN_SINGLE_GROUP_OFFSET);
3153 writel(0x0F, &sdr_rw_load_mgr_regs->load_cntr0);
3154 writel(RW_MGR_ACTIVATE_0_AND_1_WAIT1,
3155 &sdr_rw_load_jump_mgr_regs->load_jump_add0);
3157 writel(0x0F, &sdr_rw_load_mgr_regs->load_cntr1);
3158 writel(RW_MGR_ACTIVATE_0_AND_1_WAIT2,
3159 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
3162 writel(RW_MGR_ACTIVATE_0_AND_1, SDR_PHYGRP_RWMGRGRP_ADDRESS |
3163 RW_MGR_RUN_SINGLE_GROUP_OFFSET);
3167 /* Configure various memory related parameters. */
3168 static void mem_config(void)
3170 uint32_t rlat, wlat;
3171 uint32_t rw_wl_nop_cycles;
3172 uint32_t max_latency;
3174 debug("%s:%d\n", __func__, __LINE__);
3175 /* read in write and read latency */
3176 wlat = readl(&data_mgr->t_wl_add);
3177 wlat += readl(&data_mgr->mem_t_add);
3179 /* WL for hard phy does not include additive latency */
3182 * add addtional write latency to offset the address/command extra
3183 * clock cycle. We change the AC mux setting causing AC to be delayed
3184 * by one mem clock cycle. Only do this for DDR3
3188 rlat = readl(&data_mgr->t_rl_add);
3190 rw_wl_nop_cycles = wlat - 2;
3191 gbl->rw_wl_nop_cycles = rw_wl_nop_cycles;
3194 * For AV/CV, lfifo is hardened and always runs at full rate so
3195 * max latency in AFI clocks, used here, is correspondingly smaller.
3197 max_latency = (1<<MAX_LATENCY_COUNT_WIDTH)/1 - 1;
3198 /* configure for a burst length of 8 */
3201 /* Adjust Write Latency for Hard PHY */
3204 /* set a pretty high read latency initially */
3205 gbl->curr_read_lat = rlat + 16;
3207 if (gbl->curr_read_lat > max_latency)
3208 gbl->curr_read_lat = max_latency;
3210 writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat);
3212 /* advertise write latency */
3213 gbl->curr_write_lat = wlat;
3214 writel(wlat - 2, &phy_mgr_cfg->afi_wlat);
3216 /* initialize bit slips */
3217 mem_precharge_and_activate();
3220 /* Set VFIFO and LFIFO to instant-on settings in skip calibration mode */
3221 static void mem_skip_calibrate(void)
3223 uint32_t vfifo_offset;
3226 debug("%s:%d\n", __func__, __LINE__);
3227 /* Need to update every shadow register set used by the interface */
3228 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
3229 r += NUM_RANKS_PER_SHADOW_REG) {
3231 * Set output phase alignment settings appropriate for
3234 for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) {
3235 scc_mgr_set_dqs_en_phase(i, 0);
3236 #if IO_DLL_CHAIN_LENGTH == 6
3237 scc_mgr_set_dqdqs_output_phase(i, 6);
3239 scc_mgr_set_dqdqs_output_phase(i, 7);
3244 * Write data arrives to the I/O two cycles before write
3245 * latency is reached (720 deg).
3246 * -> due to bit-slip in a/c bus
3247 * -> to allow board skew where dqs is longer than ck
3248 * -> how often can this happen!?
3249 * -> can claim back some ptaps for high freq
3250 * support if we can relax this, but i digress...
3252 * The write_clk leads mem_ck by 90 deg
3253 * The minimum ptap of the OPA is 180 deg
3254 * Each ptap has (360 / IO_DLL_CHAIN_LENGH) deg of delay
3255 * The write_clk is always delayed by 2 ptaps
3257 * Hence, to make DQS aligned to CK, we need to delay
3259 * (720 - 90 - 180 - 2 * (360 / IO_DLL_CHAIN_LENGTH))
3261 * Dividing the above by (360 / IO_DLL_CHAIN_LENGTH)
3262 * gives us the number of ptaps, which simplies to:
3264 * (1.25 * IO_DLL_CHAIN_LENGTH - 2)
3266 scc_mgr_set_dqdqs_output_phase(i, (1.25 *
3267 IO_DLL_CHAIN_LENGTH - 2));
3269 writel(0xff, &sdr_scc_mgr->dqs_ena);
3270 writel(0xff, &sdr_scc_mgr->dqs_io_ena);
3272 for (i = 0; i < RW_MGR_MEM_IF_WRITE_DQS_WIDTH; i++) {
3273 writel(i, SDR_PHYGRP_SCCGRP_ADDRESS |
3274 SCC_MGR_GROUP_COUNTER_OFFSET);
3276 writel(0xff, &sdr_scc_mgr->dq_ena);
3277 writel(0xff, &sdr_scc_mgr->dm_ena);
3278 writel(0, &sdr_scc_mgr->update);
3281 /* Compensate for simulation model behaviour */
3282 for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) {
3283 scc_mgr_set_dqs_bus_in_delay(i, 10);
3284 scc_mgr_load_dqs(i);
3286 writel(0, &sdr_scc_mgr->update);
3289 * ArriaV has hard FIFOs that can only be initialized by incrementing
3292 vfifo_offset = CALIB_VFIFO_OFFSET;
3293 for (j = 0; j < vfifo_offset; j++) {
3294 writel(0xff, &phy_mgr_cmd->inc_vfifo_hard_phy);
3296 writel(0, &phy_mgr_cmd->fifo_reset);
3299 * For ACV with hard lfifo, we get the skip-cal setting from
3300 * generation-time constant.
3302 gbl->curr_read_lat = CALIB_LFIFO_OFFSET;
3303 writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat);
3306 /* Memory calibration entry point */
3307 static uint32_t mem_calibrate(void)
3310 uint32_t rank_bgn, sr;
3311 uint32_t write_group, write_test_bgn;
3312 uint32_t read_group, read_test_bgn;
3313 uint32_t run_groups, current_run;
3314 uint32_t failing_groups = 0;
3315 uint32_t group_failed = 0;
3316 uint32_t sr_failed = 0;
3318 debug("%s:%d\n", __func__, __LINE__);
3319 /* Initialize the data settings */
3321 gbl->error_substage = CAL_SUBSTAGE_NIL;
3322 gbl->error_stage = CAL_STAGE_NIL;
3323 gbl->error_group = 0xff;
3329 for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) {
3330 writel(i, SDR_PHYGRP_SCCGRP_ADDRESS |
3331 SCC_MGR_GROUP_COUNTER_OFFSET);
3332 scc_set_bypass_mode(i);
3335 if ((dyn_calib_steps & CALIB_SKIP_ALL) == CALIB_SKIP_ALL) {
3337 * Set VFIFO and LFIFO to instant-on settings in skip
3340 mem_skip_calibrate();
3342 for (i = 0; i < NUM_CALIB_REPEAT; i++) {
3344 * Zero all delay chain/phase settings for all
3345 * groups and all shadow register sets.
3349 run_groups = ~param->skip_groups;
3351 for (write_group = 0, write_test_bgn = 0; write_group
3352 < RW_MGR_MEM_IF_WRITE_DQS_WIDTH; write_group++,
3353 write_test_bgn += RW_MGR_MEM_DQ_PER_WRITE_DQS) {
3354 /* Initialized the group failure */
3357 current_run = run_groups & ((1 <<
3358 RW_MGR_NUM_DQS_PER_WRITE_GROUP) - 1);
3359 run_groups = run_groups >>
3360 RW_MGR_NUM_DQS_PER_WRITE_GROUP;
3362 if (current_run == 0)
3365 writel(write_group, SDR_PHYGRP_SCCGRP_ADDRESS |
3366 SCC_MGR_GROUP_COUNTER_OFFSET);
3367 scc_mgr_zero_group(write_group, write_test_bgn,
3370 for (read_group = write_group *
3371 RW_MGR_MEM_IF_READ_DQS_WIDTH /
3372 RW_MGR_MEM_IF_WRITE_DQS_WIDTH,
3374 read_group < (write_group + 1) *
3375 RW_MGR_MEM_IF_READ_DQS_WIDTH /
3376 RW_MGR_MEM_IF_WRITE_DQS_WIDTH &&
3378 read_group++, read_test_bgn +=
3379 RW_MGR_MEM_DQ_PER_READ_DQS) {
3380 /* Calibrate the VFIFO */
3381 if (!((STATIC_CALIB_STEPS) &
3382 CALIB_SKIP_VFIFO)) {
3383 if (!rw_mgr_mem_calibrate_vfifo
3389 phy_debug_mode_flags &
3390 PHY_DEBUG_SWEEP_ALL_GROUPS)) {
3397 /* Calibrate the output side */
3398 if (group_failed == 0) {
3399 for (rank_bgn = 0, sr = 0; rank_bgn
3400 < RW_MGR_MEM_NUMBER_OF_RANKS;
3402 NUM_RANKS_PER_SHADOW_REG,
3405 if (!((STATIC_CALIB_STEPS) &
3406 CALIB_SKIP_WRITES)) {
3407 if ((STATIC_CALIB_STEPS)
3408 & CALIB_SKIP_DELAY_SWEEPS) {
3409 /* not needed in quick mode! */
3412 * Determine if this set of
3413 * ranks should be skipped
3416 if (!param->skip_shadow_regs[sr]) {
3417 if (!rw_mgr_mem_calibrate_writes
3418 (rank_bgn, write_group,
3422 phy_debug_mode_flags &
3423 PHY_DEBUG_SWEEP_ALL_GROUPS)) {
3435 if (group_failed == 0) {
3436 for (read_group = write_group *
3437 RW_MGR_MEM_IF_READ_DQS_WIDTH /
3438 RW_MGR_MEM_IF_WRITE_DQS_WIDTH,
3440 read_group < (write_group + 1)
3441 * RW_MGR_MEM_IF_READ_DQS_WIDTH
3442 / RW_MGR_MEM_IF_WRITE_DQS_WIDTH &&
3444 read_group++, read_test_bgn +=
3445 RW_MGR_MEM_DQ_PER_READ_DQS) {
3446 if (!((STATIC_CALIB_STEPS) &
3447 CALIB_SKIP_WRITES)) {
3448 if (!rw_mgr_mem_calibrate_vfifo_end
3449 (read_group, read_test_bgn)) {
3452 if (!(gbl->phy_debug_mode_flags
3453 & PHY_DEBUG_SWEEP_ALL_GROUPS)) {
3461 if (group_failed != 0)
3466 * USER If there are any failing groups then report
3469 if (failing_groups != 0)
3472 /* Calibrate the LFIFO */
3473 if (!((STATIC_CALIB_STEPS) & CALIB_SKIP_LFIFO)) {
3475 * If we're skipping groups as part of debug,
3476 * don't calibrate LFIFO.
3478 if (param->skip_groups == 0) {
3479 if (!rw_mgr_mem_calibrate_lfifo())
3487 * Do not remove this line as it makes sure all of our decisions
3488 * have been applied.
3490 writel(0, &sdr_scc_mgr->update);
3494 static uint32_t run_mem_calibrate(void)
3497 uint32_t debug_info;
3499 debug("%s:%d\n", __func__, __LINE__);
3501 /* Reset pass/fail status shown on afi_cal_success/fail */
3502 writel(PHY_MGR_CAL_RESET, &phy_mgr_cfg->cal_status);
3504 /* stop tracking manger */
3505 uint32_t ctrlcfg = readl(&sdr_ctrl->ctrl_cfg);
3507 writel(ctrlcfg & 0xFFBFFFFF, &sdr_ctrl->ctrl_cfg);
3510 rw_mgr_mem_initialize();
3512 pass = mem_calibrate();
3514 mem_precharge_and_activate();
3515 writel(0, &phy_mgr_cmd->fifo_reset);
3519 * Don't return control of the PHY back to AFI when in debug mode.
3521 if ((gbl->phy_debug_mode_flags & PHY_DEBUG_IN_DEBUG_MODE) == 0) {
3522 rw_mgr_mem_handoff();
3524 * In Hard PHY this is a 2-bit control:
3526 * 1: DDIO Mux Select
3528 writel(0x2, &phy_mgr_cfg->mux_sel);
3531 writel(ctrlcfg, &sdr_ctrl->ctrl_cfg);
3534 printf("%s: CALIBRATION PASSED\n", __FILE__);
3539 if (gbl->fom_in > 0xff)
3542 if (gbl->fom_out > 0xff)
3543 gbl->fom_out = 0xff;
3545 /* Update the FOM in the register file */
3546 debug_info = gbl->fom_in;
3547 debug_info |= gbl->fom_out << 8;
3548 writel(debug_info, &sdr_reg_file->fom);
3550 writel(debug_info, &phy_mgr_cfg->cal_debug_info);
3551 writel(PHY_MGR_CAL_SUCCESS, &phy_mgr_cfg->cal_status);
3553 printf("%s: CALIBRATION FAILED\n", __FILE__);
3555 debug_info = gbl->error_stage;
3556 debug_info |= gbl->error_substage << 8;
3557 debug_info |= gbl->error_group << 16;
3559 writel(debug_info, &sdr_reg_file->failing_stage);
3560 writel(debug_info, &phy_mgr_cfg->cal_debug_info);
3561 writel(PHY_MGR_CAL_FAIL, &phy_mgr_cfg->cal_status);
3563 /* Update the failing group/stage in the register file */
3564 debug_info = gbl->error_stage;
3565 debug_info |= gbl->error_substage << 8;
3566 debug_info |= gbl->error_group << 16;
3567 writel(debug_info, &sdr_reg_file->failing_stage);
3574 * hc_initialize_rom_data() - Initialize ROM data
3576 * Initialize ROM data.
3578 static void hc_initialize_rom_data(void)
3582 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_INST_ROM_WRITE_OFFSET;
3583 for (i = 0; i < ARRAY_SIZE(inst_rom_init); i++)
3584 writel(inst_rom_init[i], addr + (i << 2));
3586 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_AC_ROM_WRITE_OFFSET;
3587 for (i = 0; i < ARRAY_SIZE(ac_rom_init); i++)
3588 writel(ac_rom_init[i], addr + (i << 2));
3592 * initialize_reg_file() - Initialize SDR register file
3594 * Initialize SDR register file.
3596 static void initialize_reg_file(void)
3598 /* Initialize the register file with the correct data */
3599 writel(REG_FILE_INIT_SEQ_SIGNATURE, &sdr_reg_file->signature);
3600 writel(0, &sdr_reg_file->debug_data_addr);
3601 writel(0, &sdr_reg_file->cur_stage);
3602 writel(0, &sdr_reg_file->fom);
3603 writel(0, &sdr_reg_file->failing_stage);
3604 writel(0, &sdr_reg_file->debug1);
3605 writel(0, &sdr_reg_file->debug2);
3609 * initialize_hps_phy() - Initialize HPS PHY
3611 * Initialize HPS PHY.
3613 static void initialize_hps_phy(void)
3617 * Tracking also gets configured here because it's in the
3620 uint32_t trk_sample_count = 7500;
3621 uint32_t trk_long_idle_sample_count = (10 << 16) | 100;
3623 * Format is number of outer loops in the 16 MSB, sample
3628 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ACDELAYEN_SET(2);
3629 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQDELAYEN_SET(1);
3630 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSDELAYEN_SET(1);
3631 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSLOGICDELAYEN_SET(1);
3632 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_RESETDELAYEN_SET(0);
3633 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_LPDDRDIS_SET(1);
3635 * This field selects the intrinsic latency to RDATA_EN/FULL path.
3636 * 00-bypass, 01- add 5 cycles, 10- add 10 cycles, 11- add 15 cycles.
3638 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ADDLATSEL_SET(0);
3639 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_SAMPLECOUNT_19_0_SET(
3641 writel(reg, &sdr_ctrl->phy_ctrl0);
3644 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_SAMPLECOUNT_31_20_SET(
3646 SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_SAMPLECOUNT_19_0_WIDTH);
3647 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_LONGIDLESAMPLECOUNT_19_0_SET(
3648 trk_long_idle_sample_count);
3649 writel(reg, &sdr_ctrl->phy_ctrl1);
3652 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_2_LONGIDLESAMPLECOUNT_31_20_SET(
3653 trk_long_idle_sample_count >>
3654 SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_LONGIDLESAMPLECOUNT_19_0_WIDTH);
3655 writel(reg, &sdr_ctrl->phy_ctrl2);
3658 static void initialize_tracking(void)
3660 uint32_t concatenated_longidle = 0x0;
3661 uint32_t concatenated_delays = 0x0;
3662 uint32_t concatenated_rw_addr = 0x0;
3663 uint32_t concatenated_refresh = 0x0;
3664 uint32_t trk_sample_count = 7500;
3665 uint32_t dtaps_per_ptap;
3669 * compute usable version of value in case we skip full
3674 while (tmp_delay < IO_DELAY_PER_OPA_TAP) {
3676 tmp_delay += IO_DELAY_PER_DCHAIN_TAP;
3680 concatenated_longidle = concatenated_longidle ^ 10;
3681 /*longidle outer loop */
3682 concatenated_longidle = concatenated_longidle << 16;
3683 concatenated_longidle = concatenated_longidle ^ 100;
3684 /*longidle sample count */
3685 concatenated_delays = concatenated_delays ^ 243;
3686 /* trfc, worst case of 933Mhz 4Gb */
3687 concatenated_delays = concatenated_delays << 8;
3688 concatenated_delays = concatenated_delays ^ 14;
3689 /* trcd, worst case */
3690 concatenated_delays = concatenated_delays << 8;
3691 concatenated_delays = concatenated_delays ^ 10;
3693 concatenated_delays = concatenated_delays << 8;
3694 concatenated_delays = concatenated_delays ^ 4;
3697 concatenated_rw_addr = concatenated_rw_addr ^ RW_MGR_IDLE;
3698 concatenated_rw_addr = concatenated_rw_addr << 8;
3699 concatenated_rw_addr = concatenated_rw_addr ^ RW_MGR_ACTIVATE_1;
3700 concatenated_rw_addr = concatenated_rw_addr << 8;
3701 concatenated_rw_addr = concatenated_rw_addr ^ RW_MGR_SGLE_READ;
3702 concatenated_rw_addr = concatenated_rw_addr << 8;
3703 concatenated_rw_addr = concatenated_rw_addr ^ RW_MGR_PRECHARGE_ALL;
3705 concatenated_refresh = concatenated_refresh ^ RW_MGR_REFRESH_ALL;
3706 concatenated_refresh = concatenated_refresh << 24;
3707 concatenated_refresh = concatenated_refresh ^ 1000; /* trefi */
3709 /* Initialize the register file with the correct data */
3710 writel(dtaps_per_ptap, &sdr_reg_file->dtaps_per_ptap);
3711 writel(trk_sample_count, &sdr_reg_file->trk_sample_count);
3712 writel(concatenated_longidle, &sdr_reg_file->trk_longidle);
3713 writel(concatenated_delays, &sdr_reg_file->delays);
3714 writel(concatenated_rw_addr, &sdr_reg_file->trk_rw_mgr_addr);
3715 writel(RW_MGR_MEM_IF_READ_DQS_WIDTH, &sdr_reg_file->trk_read_dqs_width);
3716 writel(concatenated_refresh, &sdr_reg_file->trk_rfsh);
3719 int sdram_calibration_full(void)
3721 struct param_type my_param;
3722 struct gbl_type my_gbl;
3729 /* Initialize the debug mode flags */
3730 gbl->phy_debug_mode_flags = 0;
3731 /* Set the calibration enabled by default */
3732 gbl->phy_debug_mode_flags |= PHY_DEBUG_ENABLE_CAL_RPT;
3734 * Only sweep all groups (regardless of fail state) by default
3735 * Set enabled read test by default.
3737 #if DISABLE_GUARANTEED_READ
3738 gbl->phy_debug_mode_flags |= PHY_DEBUG_DISABLE_GUARANTEED_READ;
3740 /* Initialize the register file */
3741 initialize_reg_file();
3743 /* Initialize any PHY CSR */
3744 initialize_hps_phy();
3746 scc_mgr_initialize();
3748 initialize_tracking();
3750 /* USER Enable all ranks, groups */
3751 for (i = 0; i < RW_MGR_MEM_NUMBER_OF_RANKS; i++)
3752 param->skip_ranks[i] = 0;
3753 for (i = 0; i < NUM_SHADOW_REGS; ++i)
3754 param->skip_shadow_regs[i] = 0;
3755 param->skip_groups = 0;
3757 printf("%s: Preparing to start memory calibration\n", __FILE__);
3759 debug("%s:%d\n", __func__, __LINE__);
3760 debug_cond(DLEVEL == 1,
3761 "DDR3 FULL_RATE ranks=%u cs/dimm=%u dq/dqs=%u,%u vg/dqs=%u,%u ",
3762 RW_MGR_MEM_NUMBER_OF_RANKS, RW_MGR_MEM_NUMBER_OF_CS_PER_DIMM,
3763 RW_MGR_MEM_DQ_PER_READ_DQS, RW_MGR_MEM_DQ_PER_WRITE_DQS,
3764 RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS,
3765 RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS);
3766 debug_cond(DLEVEL == 1,
3767 "dqs=%u,%u dq=%u dm=%u ptap_delay=%u dtap_delay=%u ",
3768 RW_MGR_MEM_IF_READ_DQS_WIDTH, RW_MGR_MEM_IF_WRITE_DQS_WIDTH,
3769 RW_MGR_MEM_DATA_WIDTH, RW_MGR_MEM_DATA_MASK_WIDTH,
3770 IO_DELAY_PER_OPA_TAP, IO_DELAY_PER_DCHAIN_TAP);
3771 debug_cond(DLEVEL == 1, "dtap_dqsen_delay=%u, dll=%u",
3772 IO_DELAY_PER_DQS_EN_DCHAIN_TAP, IO_DLL_CHAIN_LENGTH);
3773 debug_cond(DLEVEL == 1, "max values: en_p=%u dqdqs_p=%u en_d=%u dqs_in_d=%u ",
3774 IO_DQS_EN_PHASE_MAX, IO_DQDQS_OUT_PHASE_MAX,
3775 IO_DQS_EN_DELAY_MAX, IO_DQS_IN_DELAY_MAX);
3776 debug_cond(DLEVEL == 1, "io_in_d=%u io_out1_d=%u io_out2_d=%u ",
3777 IO_IO_IN_DELAY_MAX, IO_IO_OUT1_DELAY_MAX,
3778 IO_IO_OUT2_DELAY_MAX);
3779 debug_cond(DLEVEL == 1, "dqs_in_reserve=%u dqs_out_reserve=%u\n",
3780 IO_DQS_IN_RESERVE, IO_DQS_OUT_RESERVE);
3782 hc_initialize_rom_data();
3784 /* update info for sims */
3785 reg_file_set_stage(CAL_STAGE_NIL);
3786 reg_file_set_group(0);
3789 * Load global needed for those actions that require
3790 * some dynamic calibration support.
3792 dyn_calib_steps = STATIC_CALIB_STEPS;
3794 * Load global to allow dynamic selection of delay loop settings
3795 * based on calibration mode.
3797 if (!(dyn_calib_steps & CALIB_SKIP_DELAY_LOOPS))
3798 skip_delay_mask = 0xff;
3800 skip_delay_mask = 0x0;
3802 pass = run_mem_calibrate();
3804 printf("%s: Calibration complete\n", __FILE__);