2 * Copyright Altera Corporation (C) 2012-2015
4 * SPDX-License-Identifier: BSD-3-Clause
9 #include <asm/arch/sdram.h>
10 #include "sequencer.h"
11 #include "sequencer_auto.h"
12 #include "sequencer_auto_ac_init.h"
13 #include "sequencer_auto_inst_init.h"
14 #include "sequencer_defines.h"
16 static struct socfpga_sdr_rw_load_manager *sdr_rw_load_mgr_regs =
17 (struct socfpga_sdr_rw_load_manager *)(SDR_PHYGRP_RWMGRGRP_ADDRESS | 0x800);
19 static struct socfpga_sdr_rw_load_jump_manager *sdr_rw_load_jump_mgr_regs =
20 (struct socfpga_sdr_rw_load_jump_manager *)(SDR_PHYGRP_RWMGRGRP_ADDRESS | 0xC00);
22 static struct socfpga_sdr_reg_file *sdr_reg_file =
23 (struct socfpga_sdr_reg_file *)SDR_PHYGRP_REGFILEGRP_ADDRESS;
25 static struct socfpga_sdr_scc_mgr *sdr_scc_mgr =
26 (struct socfpga_sdr_scc_mgr *)(SDR_PHYGRP_SCCGRP_ADDRESS | 0xe00);
28 static struct socfpga_phy_mgr_cmd *phy_mgr_cmd =
29 (struct socfpga_phy_mgr_cmd *)SDR_PHYGRP_PHYMGRGRP_ADDRESS;
31 static struct socfpga_phy_mgr_cfg *phy_mgr_cfg =
32 (struct socfpga_phy_mgr_cfg *)(SDR_PHYGRP_PHYMGRGRP_ADDRESS | 0x40);
34 static struct socfpga_data_mgr *data_mgr =
35 (struct socfpga_data_mgr *)SDR_PHYGRP_DATAMGRGRP_ADDRESS;
37 static struct socfpga_sdr_ctrl *sdr_ctrl =
38 (struct socfpga_sdr_ctrl *)SDR_CTRLGRP_ADDRESS;
43 * In order to reduce ROM size, most of the selectable calibration steps are
44 * decided at compile time based on the user's calibration mode selection,
45 * as captured by the STATIC_CALIB_STEPS selection below.
47 * However, to support simulation-time selection of fast simulation mode, where
48 * we skip everything except the bare minimum, we need a few of the steps to
49 * be dynamic. In those cases, we either use the DYNAMIC_CALIB_STEPS for the
50 * check, which is based on the rtl-supplied value, or we dynamically compute
51 * the value to use based on the dynamically-chosen calibration mode
55 #define STATIC_IN_RTL_SIM 0
56 #define STATIC_SKIP_DELAY_LOOPS 0
58 #define STATIC_CALIB_STEPS (STATIC_IN_RTL_SIM | CALIB_SKIP_FULL_TEST | \
59 STATIC_SKIP_DELAY_LOOPS)
61 /* calibration steps requested by the rtl */
62 uint16_t dyn_calib_steps;
65 * To make CALIB_SKIP_DELAY_LOOPS a dynamic conditional option
66 * instead of static, we use boolean logic to select between
67 * non-skip and skip values
69 * The mask is set to include all bits when not-skipping, but is
73 uint16_t skip_delay_mask; /* mask off bits when skipping/not-skipping */
75 #define SKIP_DELAY_LOOP_VALUE_OR_ZERO(non_skip_value) \
76 ((non_skip_value) & skip_delay_mask)
79 struct param_type *param;
80 uint32_t curr_shadow_reg;
82 static uint32_t rw_mgr_mem_calibrate_write_test(uint32_t rank_bgn,
83 uint32_t write_group, uint32_t use_dm,
84 uint32_t all_correct, uint32_t *bit_chk, uint32_t all_ranks);
86 static void set_failing_group_stage(uint32_t group, uint32_t stage,
90 * Only set the global stage if there was not been any other
93 if (gbl->error_stage == CAL_STAGE_NIL) {
94 gbl->error_substage = substage;
95 gbl->error_stage = stage;
96 gbl->error_group = group;
100 static void reg_file_set_group(u16 set_group)
102 clrsetbits_le32(&sdr_reg_file->cur_stage, 0xffff0000, set_group << 16);
105 static void reg_file_set_stage(u8 set_stage)
107 clrsetbits_le32(&sdr_reg_file->cur_stage, 0xffff, set_stage & 0xff);
110 static void reg_file_set_sub_stage(u8 set_sub_stage)
112 set_sub_stage &= 0xff;
113 clrsetbits_le32(&sdr_reg_file->cur_stage, 0xff00, set_sub_stage << 8);
117 * phy_mgr_initialize() - Initialize PHY Manager
119 * Initialize PHY Manager.
121 static void phy_mgr_initialize(void)
125 debug("%s:%d\n", __func__, __LINE__);
126 /* Calibration has control over path to memory */
128 * In Hard PHY this is a 2-bit control:
132 writel(0x3, &phy_mgr_cfg->mux_sel);
134 /* USER memory clock is not stable we begin initialization */
135 writel(0, &phy_mgr_cfg->reset_mem_stbl);
137 /* USER calibration status all set to zero */
138 writel(0, &phy_mgr_cfg->cal_status);
140 writel(0, &phy_mgr_cfg->cal_debug_info);
142 /* Init params only if we do NOT skip calibration. */
143 if ((dyn_calib_steps & CALIB_SKIP_ALL) == CALIB_SKIP_ALL)
146 ratio = RW_MGR_MEM_DQ_PER_READ_DQS /
147 RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS;
148 param->read_correct_mask_vg = (1 << ratio) - 1;
149 param->write_correct_mask_vg = (1 << ratio) - 1;
150 param->read_correct_mask = (1 << RW_MGR_MEM_DQ_PER_READ_DQS) - 1;
151 param->write_correct_mask = (1 << RW_MGR_MEM_DQ_PER_WRITE_DQS) - 1;
152 ratio = RW_MGR_MEM_DATA_WIDTH /
153 RW_MGR_MEM_DATA_MASK_WIDTH;
154 param->dm_correct_mask = (1 << ratio) - 1;
158 * set_rank_and_odt_mask() - Set Rank and ODT mask
160 * @odt_mode: ODT mode, OFF or READ_WRITE
162 * Set Rank and ODT mask (On-Die Termination).
164 static void set_rank_and_odt_mask(const u32 rank, const u32 odt_mode)
170 if (odt_mode == RW_MGR_ODT_MODE_OFF) {
173 } else { /* RW_MGR_ODT_MODE_READ_WRITE */
174 switch (RW_MGR_MEM_NUMBER_OF_RANKS) {
176 /* Read: ODT = 0 ; Write: ODT = 1 */
180 case 2: /* 2 Ranks */
181 if (RW_MGR_MEM_NUMBER_OF_CS_PER_DIMM == 1) {
183 * - Dual-Slot , Single-Rank (1 CS per DIMM)
185 * - RDIMM, 4 total CS (2 CS per DIMM, 2 DIMM)
187 * Since MEM_NUMBER_OF_RANKS is 2, they
188 * are both single rank with 2 CS each
189 * (special for RDIMM).
191 * Read: Turn on ODT on the opposite rank
192 * Write: Turn on ODT on all ranks
194 odt_mask_0 = 0x3 & ~(1 << rank);
198 * - Single-Slot , Dual-Rank (2 CS per DIMM)
200 * Read: Turn on ODT off on all ranks
201 * Write: Turn on ODT on active rank
204 odt_mask_1 = 0x3 & (1 << rank);
207 case 4: /* 4 Ranks */
209 * ----------+-----------------------+
211 * Read From +-----------------------+
212 * Rank | 3 | 2 | 1 | 0 |
213 * ----------+-----+-----+-----+-----+
214 * 0 | 0 | 1 | 0 | 0 |
215 * 1 | 1 | 0 | 0 | 0 |
216 * 2 | 0 | 0 | 0 | 1 |
217 * 3 | 0 | 0 | 1 | 0 |
218 * ----------+-----+-----+-----+-----+
221 * ----------+-----------------------+
223 * Write To +-----------------------+
224 * Rank | 3 | 2 | 1 | 0 |
225 * ----------+-----+-----+-----+-----+
226 * 0 | 0 | 1 | 0 | 1 |
227 * 1 | 1 | 0 | 1 | 0 |
228 * 2 | 0 | 1 | 0 | 1 |
229 * 3 | 1 | 0 | 1 | 0 |
230 * ----------+-----+-----+-----+-----+
254 cs_and_odt_mask = (0xFF & ~(1 << rank)) |
255 ((0xFF & odt_mask_0) << 8) |
256 ((0xFF & odt_mask_1) << 16);
257 writel(cs_and_odt_mask, SDR_PHYGRP_RWMGRGRP_ADDRESS |
258 RW_MGR_SET_CS_AND_ODT_MASK_OFFSET);
262 * scc_mgr_set() - Set SCC Manager register
263 * @off: Base offset in SCC Manager space
264 * @grp: Read/Write group
265 * @val: Value to be set
267 * This function sets the SCC Manager (Scan Chain Control Manager) register.
269 static void scc_mgr_set(u32 off, u32 grp, u32 val)
271 writel(val, SDR_PHYGRP_SCCGRP_ADDRESS | off | (grp << 2));
275 * scc_mgr_initialize() - Initialize SCC Manager registers
277 * Initialize SCC Manager registers.
279 static void scc_mgr_initialize(void)
282 * Clear register file for HPS. 16 (2^4) is the size of the
283 * full register file in the scc mgr:
284 * RFILE_DEPTH = 1 + log2(MEM_DQ_PER_DQS + 1 + MEM_DM_PER_DQS +
285 * MEM_IF_READ_DQS_WIDTH - 1);
289 for (i = 0; i < 16; i++) {
290 debug_cond(DLEVEL == 1, "%s:%d: Clearing SCC RFILE index %u\n",
291 __func__, __LINE__, i);
292 scc_mgr_set(SCC_MGR_HHP_RFILE_OFFSET, 0, i);
296 static void scc_mgr_set_dqdqs_output_phase(uint32_t write_group, uint32_t phase)
298 scc_mgr_set(SCC_MGR_DQDQS_OUT_PHASE_OFFSET, write_group, phase);
301 static void scc_mgr_set_dqs_bus_in_delay(uint32_t read_group, uint32_t delay)
303 scc_mgr_set(SCC_MGR_DQS_IN_DELAY_OFFSET, read_group, delay);
306 static void scc_mgr_set_dqs_en_phase(uint32_t read_group, uint32_t phase)
308 scc_mgr_set(SCC_MGR_DQS_EN_PHASE_OFFSET, read_group, phase);
311 static void scc_mgr_set_dqs_en_delay(uint32_t read_group, uint32_t delay)
313 scc_mgr_set(SCC_MGR_DQS_EN_DELAY_OFFSET, read_group, delay);
316 static void scc_mgr_set_dqs_io_in_delay(uint32_t delay)
318 scc_mgr_set(SCC_MGR_IO_IN_DELAY_OFFSET, RW_MGR_MEM_DQ_PER_WRITE_DQS,
322 static void scc_mgr_set_dq_in_delay(uint32_t dq_in_group, uint32_t delay)
324 scc_mgr_set(SCC_MGR_IO_IN_DELAY_OFFSET, dq_in_group, delay);
327 static void scc_mgr_set_dq_out1_delay(uint32_t dq_in_group, uint32_t delay)
329 scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET, dq_in_group, delay);
332 static void scc_mgr_set_dqs_out1_delay(uint32_t delay)
334 scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET, RW_MGR_MEM_DQ_PER_WRITE_DQS,
338 static void scc_mgr_set_dm_out1_delay(uint32_t dm, uint32_t delay)
340 scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET,
341 RW_MGR_MEM_DQ_PER_WRITE_DQS + 1 + dm,
345 /* load up dqs config settings */
346 static void scc_mgr_load_dqs(uint32_t dqs)
348 writel(dqs, &sdr_scc_mgr->dqs_ena);
351 /* load up dqs io config settings */
352 static void scc_mgr_load_dqs_io(void)
354 writel(0, &sdr_scc_mgr->dqs_io_ena);
357 /* load up dq config settings */
358 static void scc_mgr_load_dq(uint32_t dq_in_group)
360 writel(dq_in_group, &sdr_scc_mgr->dq_ena);
363 /* load up dm config settings */
364 static void scc_mgr_load_dm(uint32_t dm)
366 writel(dm, &sdr_scc_mgr->dm_ena);
370 * scc_mgr_set_all_ranks() - Set SCC Manager register for all ranks
371 * @off: Base offset in SCC Manager space
372 * @grp: Read/Write group
373 * @val: Value to be set
374 * @update: If non-zero, trigger SCC Manager update for all ranks
376 * This function sets the SCC Manager (Scan Chain Control Manager) register
377 * and optionally triggers the SCC update for all ranks.
379 static void scc_mgr_set_all_ranks(const u32 off, const u32 grp, const u32 val,
384 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
385 r += NUM_RANKS_PER_SHADOW_REG) {
386 scc_mgr_set(off, grp, val);
388 if (update || (r == 0)) {
389 writel(grp, &sdr_scc_mgr->dqs_ena);
390 writel(0, &sdr_scc_mgr->update);
395 static void scc_mgr_set_dqs_en_phase_all_ranks(u32 read_group, u32 phase)
398 * USER although the h/w doesn't support different phases per
399 * shadow register, for simplicity our scc manager modeling
400 * keeps different phase settings per shadow reg, and it's
401 * important for us to keep them in sync to match h/w.
402 * for efficiency, the scan chain update should occur only
405 scc_mgr_set_all_ranks(SCC_MGR_DQS_EN_PHASE_OFFSET,
406 read_group, phase, 0);
409 static void scc_mgr_set_dqdqs_output_phase_all_ranks(uint32_t write_group,
413 * USER although the h/w doesn't support different phases per
414 * shadow register, for simplicity our scc manager modeling
415 * keeps different phase settings per shadow reg, and it's
416 * important for us to keep them in sync to match h/w.
417 * for efficiency, the scan chain update should occur only
420 scc_mgr_set_all_ranks(SCC_MGR_DQDQS_OUT_PHASE_OFFSET,
421 write_group, phase, 0);
424 static void scc_mgr_set_dqs_en_delay_all_ranks(uint32_t read_group,
428 * In shadow register mode, the T11 settings are stored in
429 * registers in the core, which are updated by the DQS_ENA
430 * signals. Not issuing the SCC_MGR_UPD command allows us to
431 * save lots of rank switching overhead, by calling
432 * select_shadow_regs_for_update with update_scan_chains
435 scc_mgr_set_all_ranks(SCC_MGR_DQS_EN_DELAY_OFFSET,
436 read_group, delay, 1);
437 writel(0, &sdr_scc_mgr->update);
441 * scc_mgr_set_oct_out1_delay() - Set OCT output delay
442 * @write_group: Write group
443 * @delay: Delay value
445 * This function sets the OCT output delay in SCC manager.
447 static void scc_mgr_set_oct_out1_delay(const u32 write_group, const u32 delay)
449 const int ratio = RW_MGR_MEM_IF_READ_DQS_WIDTH /
450 RW_MGR_MEM_IF_WRITE_DQS_WIDTH;
451 const int base = write_group * ratio;
454 * Load the setting in the SCC manager
455 * Although OCT affects only write data, the OCT delay is controlled
456 * by the DQS logic block which is instantiated once per read group.
457 * For protocols where a write group consists of multiple read groups,
458 * the setting must be set multiple times.
460 for (i = 0; i < ratio; i++)
461 scc_mgr_set(SCC_MGR_OCT_OUT1_DELAY_OFFSET, base + i, delay);
465 * scc_mgr_set_hhp_extras() - Set HHP extras.
467 * Load the fixed setting in the SCC manager HHP extras.
469 static void scc_mgr_set_hhp_extras(void)
472 * Load the fixed setting in the SCC manager
473 * bits: 0:0 = 1'b1 - DQS bypass
474 * bits: 1:1 = 1'b1 - DQ bypass
475 * bits: 4:2 = 3'b001 - rfifo_mode
476 * bits: 6:5 = 2'b01 - rfifo clock_select
477 * bits: 7:7 = 1'b0 - separate gating from ungating setting
478 * bits: 8:8 = 1'b0 - separate OE from Output delay setting
480 const u32 value = (0 << 8) | (0 << 7) | (1 << 5) |
481 (1 << 2) | (1 << 1) | (1 << 0);
482 const u32 addr = SDR_PHYGRP_SCCGRP_ADDRESS |
483 SCC_MGR_HHP_GLOBALS_OFFSET |
484 SCC_MGR_HHP_EXTRAS_OFFSET;
486 debug_cond(DLEVEL == 1, "%s:%d Setting HHP Extras\n",
489 debug_cond(DLEVEL == 1, "%s:%d Done Setting HHP Extras\n",
494 * scc_mgr_zero_all() - Zero all DQS config
496 * Zero all DQS config.
498 static void scc_mgr_zero_all(void)
503 * USER Zero all DQS config settings, across all groups and all
506 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
507 r += NUM_RANKS_PER_SHADOW_REG) {
508 for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) {
510 * The phases actually don't exist on a per-rank basis,
511 * but there's no harm updating them several times, so
512 * let's keep the code simple.
514 scc_mgr_set_dqs_bus_in_delay(i, IO_DQS_IN_RESERVE);
515 scc_mgr_set_dqs_en_phase(i, 0);
516 scc_mgr_set_dqs_en_delay(i, 0);
519 for (i = 0; i < RW_MGR_MEM_IF_WRITE_DQS_WIDTH; i++) {
520 scc_mgr_set_dqdqs_output_phase(i, 0);
521 /* Arria V/Cyclone V don't have out2. */
522 scc_mgr_set_oct_out1_delay(i, IO_DQS_OUT_RESERVE);
526 /* Multicast to all DQS group enables. */
527 writel(0xff, &sdr_scc_mgr->dqs_ena);
528 writel(0, &sdr_scc_mgr->update);
532 * scc_set_bypass_mode() - Set bypass mode and trigger SCC update
533 * @write_group: Write group
535 * Set bypass mode and trigger SCC update.
537 static void scc_set_bypass_mode(const u32 write_group)
539 /* Multicast to all DQ enables. */
540 writel(0xff, &sdr_scc_mgr->dq_ena);
541 writel(0xff, &sdr_scc_mgr->dm_ena);
543 /* Update current DQS IO enable. */
544 writel(0, &sdr_scc_mgr->dqs_io_ena);
546 /* Update the DQS logic. */
547 writel(write_group, &sdr_scc_mgr->dqs_ena);
550 writel(0, &sdr_scc_mgr->update);
554 * scc_mgr_load_dqs_for_write_group() - Load DQS settings for Write Group
555 * @write_group: Write group
557 * Load DQS settings for Write Group, do not trigger SCC update.
559 static void scc_mgr_load_dqs_for_write_group(const u32 write_group)
561 const int ratio = RW_MGR_MEM_IF_READ_DQS_WIDTH /
562 RW_MGR_MEM_IF_WRITE_DQS_WIDTH;
563 const int base = write_group * ratio;
566 * Load the setting in the SCC manager
567 * Although OCT affects only write data, the OCT delay is controlled
568 * by the DQS logic block which is instantiated once per read group.
569 * For protocols where a write group consists of multiple read groups,
570 * the setting must be set multiple times.
572 for (i = 0; i < ratio; i++)
573 writel(base + i, &sdr_scc_mgr->dqs_ena);
577 * scc_mgr_zero_group() - Zero all configs for a group
579 * Zero DQ, DM, DQS and OCT configs for a group.
581 static void scc_mgr_zero_group(const u32 write_group, const int out_only)
585 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
586 r += NUM_RANKS_PER_SHADOW_REG) {
587 /* Zero all DQ config settings. */
588 for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
589 scc_mgr_set_dq_out1_delay(i, 0);
591 scc_mgr_set_dq_in_delay(i, 0);
594 /* Multicast to all DQ enables. */
595 writel(0xff, &sdr_scc_mgr->dq_ena);
597 /* Zero all DM config settings. */
598 for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++)
599 scc_mgr_set_dm_out1_delay(i, 0);
601 /* Multicast to all DM enables. */
602 writel(0xff, &sdr_scc_mgr->dm_ena);
604 /* Zero all DQS IO settings. */
606 scc_mgr_set_dqs_io_in_delay(0);
608 /* Arria V/Cyclone V don't have out2. */
609 scc_mgr_set_dqs_out1_delay(IO_DQS_OUT_RESERVE);
610 scc_mgr_set_oct_out1_delay(write_group, IO_DQS_OUT_RESERVE);
611 scc_mgr_load_dqs_for_write_group(write_group);
613 /* Multicast to all DQS IO enables (only 1 in total). */
614 writel(0, &sdr_scc_mgr->dqs_io_ena);
616 /* Hit update to zero everything. */
617 writel(0, &sdr_scc_mgr->update);
622 * apply and load a particular input delay for the DQ pins in a group
623 * group_bgn is the index of the first dq pin (in the write group)
625 static void scc_mgr_apply_group_dq_in_delay(uint32_t group_bgn, uint32_t delay)
629 for (i = 0, p = group_bgn; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++, p++) {
630 scc_mgr_set_dq_in_delay(p, delay);
636 * scc_mgr_apply_group_dq_out1_delay() - Apply and load an output delay for the DQ pins in a group
637 * @delay: Delay value
639 * Apply and load a particular output delay for the DQ pins in a group.
641 static void scc_mgr_apply_group_dq_out1_delay(const u32 delay)
645 for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
646 scc_mgr_set_dq_out1_delay(i, delay);
651 /* apply and load a particular output delay for the DM pins in a group */
652 static void scc_mgr_apply_group_dm_out1_delay(uint32_t delay1)
656 for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++) {
657 scc_mgr_set_dm_out1_delay(i, delay1);
663 /* apply and load delay on both DQS and OCT out1 */
664 static void scc_mgr_apply_group_dqs_io_and_oct_out1(uint32_t write_group,
667 scc_mgr_set_dqs_out1_delay(delay);
668 scc_mgr_load_dqs_io();
670 scc_mgr_set_oct_out1_delay(write_group, delay);
671 scc_mgr_load_dqs_for_write_group(write_group);
675 * scc_mgr_apply_group_all_out_delay_add() - Apply a delay to the entire output side: DQ, DM, DQS, OCT
676 * @write_group: Write group
677 * @delay: Delay value
679 * Apply a delay to the entire output side: DQ, DM, DQS, OCT.
681 static void scc_mgr_apply_group_all_out_delay_add(const u32 write_group,
687 for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++)
691 for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++)
695 new_delay = READ_SCC_DQS_IO_OUT2_DELAY + delay;
696 if (new_delay > IO_IO_OUT2_DELAY_MAX) {
697 debug_cond(DLEVEL == 1,
698 "%s:%d (%u, %u) DQS: %u > %d; adding %u to OUT1\n",
699 __func__, __LINE__, write_group, delay, new_delay,
700 IO_IO_OUT2_DELAY_MAX,
701 new_delay - IO_IO_OUT2_DELAY_MAX);
702 new_delay -= IO_IO_OUT2_DELAY_MAX;
703 scc_mgr_set_dqs_out1_delay(new_delay);
706 scc_mgr_load_dqs_io();
709 new_delay = READ_SCC_OCT_OUT2_DELAY + delay;
710 if (new_delay > IO_IO_OUT2_DELAY_MAX) {
711 debug_cond(DLEVEL == 1,
712 "%s:%d (%u, %u) DQS: %u > %d; adding %u to OUT1\n",
713 __func__, __LINE__, write_group, delay,
714 new_delay, IO_IO_OUT2_DELAY_MAX,
715 new_delay - IO_IO_OUT2_DELAY_MAX);
716 new_delay -= IO_IO_OUT2_DELAY_MAX;
717 scc_mgr_set_oct_out1_delay(write_group, new_delay);
720 scc_mgr_load_dqs_for_write_group(write_group);
724 * scc_mgr_apply_group_all_out_delay_add() - Apply a delay to the entire output side to all ranks
725 * @write_group: Write group
726 * @delay: Delay value
728 * Apply a delay to the entire output side (DQ, DM, DQS, OCT) to all ranks.
731 scc_mgr_apply_group_all_out_delay_add_all_ranks(const u32 write_group,
736 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
737 r += NUM_RANKS_PER_SHADOW_REG) {
738 scc_mgr_apply_group_all_out_delay_add(write_group, delay);
739 writel(0, &sdr_scc_mgr->update);
744 * set_jump_as_return() - Return instruction optimization
746 * Optimization used to recover some slots in ddr3 inst_rom could be
747 * applied to other protocols if we wanted to
749 static void set_jump_as_return(void)
752 * To save space, we replace return with jump to special shared
753 * RETURN instruction so we set the counter to large value so that
756 writel(0xff, &sdr_rw_load_mgr_regs->load_cntr0);
757 writel(RW_MGR_RETURN, &sdr_rw_load_jump_mgr_regs->load_jump_add0);
761 * should always use constants as argument to ensure all computations are
762 * performed at compile time
764 static void delay_for_n_mem_clocks(const uint32_t clocks)
771 debug("%s:%d: clocks=%u ... start\n", __func__, __LINE__, clocks);
774 afi_clocks = (clocks + AFI_RATE_RATIO-1) / AFI_RATE_RATIO;
775 /* scale (rounding up) to get afi clocks */
778 * Note, we don't bother accounting for being off a little bit
779 * because of a few extra instructions in outer loops
780 * Note, the loops have a test at the end, and do the test before
781 * the decrement, and so always perform the loop
782 * 1 time more than the counter value
784 if (afi_clocks == 0) {
786 } else if (afi_clocks <= 0x100) {
787 inner = afi_clocks-1;
790 } else if (afi_clocks <= 0x10000) {
792 outer = (afi_clocks-1) >> 8;
797 c_loop = (afi_clocks-1) >> 16;
801 * rom instructions are structured as follows:
803 * IDLE_LOOP2: jnz cntr0, TARGET_A
804 * IDLE_LOOP1: jnz cntr1, TARGET_B
807 * so, when doing nested loops, TARGET_A is set to IDLE_LOOP2, and
808 * TARGET_B is set to IDLE_LOOP2 as well
810 * if we have no outer loop, though, then we can use IDLE_LOOP1 only,
811 * and set TARGET_B to IDLE_LOOP1 and we skip IDLE_LOOP2 entirely
813 * a little confusing, but it helps save precious space in the inst_rom
814 * and sequencer rom and keeps the delays more accurate and reduces
817 if (afi_clocks <= 0x100) {
818 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(inner),
819 &sdr_rw_load_mgr_regs->load_cntr1);
821 writel(RW_MGR_IDLE_LOOP1,
822 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
824 writel(RW_MGR_IDLE_LOOP1, SDR_PHYGRP_RWMGRGRP_ADDRESS |
825 RW_MGR_RUN_SINGLE_GROUP_OFFSET);
827 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(inner),
828 &sdr_rw_load_mgr_regs->load_cntr0);
830 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(outer),
831 &sdr_rw_load_mgr_regs->load_cntr1);
833 writel(RW_MGR_IDLE_LOOP2,
834 &sdr_rw_load_jump_mgr_regs->load_jump_add0);
836 writel(RW_MGR_IDLE_LOOP2,
837 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
839 /* hack to get around compiler not being smart enough */
840 if (afi_clocks <= 0x10000) {
841 /* only need to run once */
842 writel(RW_MGR_IDLE_LOOP2, SDR_PHYGRP_RWMGRGRP_ADDRESS |
843 RW_MGR_RUN_SINGLE_GROUP_OFFSET);
846 writel(RW_MGR_IDLE_LOOP2,
847 SDR_PHYGRP_RWMGRGRP_ADDRESS |
848 RW_MGR_RUN_SINGLE_GROUP_OFFSET);
849 } while (c_loop-- != 0);
852 debug("%s:%d clocks=%u ... end\n", __func__, __LINE__, clocks);
856 * rw_mgr_mem_init_load_regs() - Load instruction registers
857 * @cntr0: Counter 0 value
858 * @cntr1: Counter 1 value
859 * @cntr2: Counter 2 value
860 * @jump: Jump instruction value
862 * Load instruction registers.
864 static void rw_mgr_mem_init_load_regs(u32 cntr0, u32 cntr1, u32 cntr2, u32 jump)
866 uint32_t grpaddr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
867 RW_MGR_RUN_SINGLE_GROUP_OFFSET;
870 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(cntr0),
871 &sdr_rw_load_mgr_regs->load_cntr0);
872 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(cntr1),
873 &sdr_rw_load_mgr_regs->load_cntr1);
874 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(cntr2),
875 &sdr_rw_load_mgr_regs->load_cntr2);
877 /* Load jump address */
878 writel(jump, &sdr_rw_load_jump_mgr_regs->load_jump_add0);
879 writel(jump, &sdr_rw_load_jump_mgr_regs->load_jump_add1);
880 writel(jump, &sdr_rw_load_jump_mgr_regs->load_jump_add2);
882 /* Execute count instruction */
883 writel(jump, grpaddr);
887 * rw_mgr_mem_load_user() - Load user calibration values
888 * @fin1: Final instruction 1
889 * @fin2: Final instruction 2
890 * @precharge: If 1, precharge the banks at the end
892 * Load user calibration values and optionally precharge the banks.
894 static void rw_mgr_mem_load_user(const u32 fin1, const u32 fin2,
897 u32 grpaddr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
898 RW_MGR_RUN_SINGLE_GROUP_OFFSET;
901 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r++) {
902 if (param->skip_ranks[r]) {
903 /* request to skip the rank */
908 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_OFF);
910 /* precharge all banks ... */
912 writel(RW_MGR_PRECHARGE_ALL, grpaddr);
915 * USER Use Mirror-ed commands for odd ranks if address
918 if ((RW_MGR_MEM_ADDRESS_MIRRORING >> r) & 0x1) {
919 set_jump_as_return();
920 writel(RW_MGR_MRS2_MIRR, grpaddr);
921 delay_for_n_mem_clocks(4);
922 set_jump_as_return();
923 writel(RW_MGR_MRS3_MIRR, grpaddr);
924 delay_for_n_mem_clocks(4);
925 set_jump_as_return();
926 writel(RW_MGR_MRS1_MIRR, grpaddr);
927 delay_for_n_mem_clocks(4);
928 set_jump_as_return();
929 writel(fin1, grpaddr);
931 set_jump_as_return();
932 writel(RW_MGR_MRS2, grpaddr);
933 delay_for_n_mem_clocks(4);
934 set_jump_as_return();
935 writel(RW_MGR_MRS3, grpaddr);
936 delay_for_n_mem_clocks(4);
937 set_jump_as_return();
938 writel(RW_MGR_MRS1, grpaddr);
939 set_jump_as_return();
940 writel(fin2, grpaddr);
946 set_jump_as_return();
947 writel(RW_MGR_ZQCL, grpaddr);
949 /* tZQinit = tDLLK = 512 ck cycles */
950 delay_for_n_mem_clocks(512);
955 * rw_mgr_mem_initialize() - Initialize RW Manager
957 * Initialize RW Manager.
959 static void rw_mgr_mem_initialize(void)
961 debug("%s:%d\n", __func__, __LINE__);
963 /* The reset / cke part of initialization is broadcasted to all ranks */
964 writel(RW_MGR_RANK_ALL, SDR_PHYGRP_RWMGRGRP_ADDRESS |
965 RW_MGR_SET_CS_AND_ODT_MASK_OFFSET);
968 * Here's how you load register for a loop
969 * Counters are located @ 0x800
970 * Jump address are located @ 0xC00
971 * For both, registers 0 to 3 are selected using bits 3 and 2, like
972 * in 0x800, 0x804, 0x808, 0x80C and 0xC00, 0xC04, 0xC08, 0xC0C
973 * I know this ain't pretty, but Avalon bus throws away the 2 least
977 /* Start with memory RESET activated */
982 * 200us @ 266MHz (3.75 ns) ~ 54000 clock cycles
983 * If a and b are the number of iteration in 2 nested loops
984 * it takes the following number of cycles to complete the operation:
985 * number_of_cycles = ((2 + n) * a + 2) * b
986 * where n is the number of instruction in the inner loop
987 * One possible solution is n = 0 , a = 256 , b = 106 => a = FF,
990 rw_mgr_mem_init_load_regs(SEQ_TINIT_CNTR0_VAL, SEQ_TINIT_CNTR1_VAL,
992 RW_MGR_INIT_RESET_0_CKE_0);
994 /* Indicate that memory is stable. */
995 writel(1, &phy_mgr_cfg->reset_mem_stbl);
998 * transition the RESET to high
1003 * 500us @ 266MHz (3.75 ns) ~ 134000 clock cycles
1004 * If a and b are the number of iteration in 2 nested loops
1005 * it takes the following number of cycles to complete the operation
1006 * number_of_cycles = ((2 + n) * a + 2) * b
1007 * where n is the number of instruction in the inner loop
1008 * One possible solution is n = 2 , a = 131 , b = 256 => a = 83,
1011 rw_mgr_mem_init_load_regs(SEQ_TRESET_CNTR0_VAL, SEQ_TRESET_CNTR1_VAL,
1012 SEQ_TRESET_CNTR2_VAL,
1013 RW_MGR_INIT_RESET_1_CKE_0);
1015 /* Bring up clock enable. */
1017 /* tXRP < 250 ck cycles */
1018 delay_for_n_mem_clocks(250);
1020 rw_mgr_mem_load_user(RW_MGR_MRS0_DLL_RESET_MIRR, RW_MGR_MRS0_DLL_RESET,
1025 * At the end of calibration we have to program the user settings in, and
1026 * USER hand off the memory to the user.
1028 static void rw_mgr_mem_handoff(void)
1030 rw_mgr_mem_load_user(RW_MGR_MRS0_USER_MIRR, RW_MGR_MRS0_USER, 1);
1032 * USER need to wait tMOD (12CK or 15ns) time before issuing
1033 * other commands, but we will have plenty of NIOS cycles before
1034 * actual handoff so its okay.
1039 * performs a guaranteed read on the patterns we are going to use during a
1040 * read test to ensure memory works
1042 static uint32_t rw_mgr_mem_calibrate_read_test_patterns(uint32_t rank_bgn,
1043 uint32_t group, uint32_t num_tries, uint32_t *bit_chk,
1047 uint32_t correct_mask_vg;
1048 uint32_t tmp_bit_chk;
1049 uint32_t rank_end = all_ranks ? RW_MGR_MEM_NUMBER_OF_RANKS :
1050 (rank_bgn + NUM_RANKS_PER_SHADOW_REG);
1052 uint32_t base_rw_mgr;
1054 *bit_chk = param->read_correct_mask;
1055 correct_mask_vg = param->read_correct_mask_vg;
1057 for (r = rank_bgn; r < rank_end; r++) {
1058 if (param->skip_ranks[r])
1059 /* request to skip the rank */
1063 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
1065 /* Load up a constant bursts of read commands */
1066 writel(0x20, &sdr_rw_load_mgr_regs->load_cntr0);
1067 writel(RW_MGR_GUARANTEED_READ,
1068 &sdr_rw_load_jump_mgr_regs->load_jump_add0);
1070 writel(0x20, &sdr_rw_load_mgr_regs->load_cntr1);
1071 writel(RW_MGR_GUARANTEED_READ_CONT,
1072 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
1075 for (vg = RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS-1; ; vg--) {
1076 /* reset the fifos to get pointers to known state */
1078 writel(0, &phy_mgr_cmd->fifo_reset);
1079 writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS |
1080 RW_MGR_RESET_READ_DATAPATH_OFFSET);
1082 tmp_bit_chk = tmp_bit_chk << (RW_MGR_MEM_DQ_PER_READ_DQS
1083 / RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS);
1085 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET;
1086 writel(RW_MGR_GUARANTEED_READ, addr +
1087 ((group * RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS +
1090 base_rw_mgr = readl(SDR_PHYGRP_RWMGRGRP_ADDRESS);
1091 tmp_bit_chk = tmp_bit_chk | (correct_mask_vg & (~base_rw_mgr));
1096 *bit_chk &= tmp_bit_chk;
1099 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET;
1100 writel(RW_MGR_CLEAR_DQS_ENABLE, addr + (group << 2));
1102 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
1103 debug_cond(DLEVEL == 1, "%s:%d test_load_patterns(%u,ALL) => (%u == %u) =>\
1104 %lu\n", __func__, __LINE__, group, *bit_chk, param->read_correct_mask,
1105 (long unsigned int)(*bit_chk == param->read_correct_mask));
1106 return *bit_chk == param->read_correct_mask;
1109 static uint32_t rw_mgr_mem_calibrate_read_test_patterns_all_ranks
1110 (uint32_t group, uint32_t num_tries, uint32_t *bit_chk)
1112 return rw_mgr_mem_calibrate_read_test_patterns(0, group,
1113 num_tries, bit_chk, 1);
1116 /* load up the patterns we are going to use during a read test */
1117 static void rw_mgr_mem_calibrate_read_load_patterns(uint32_t rank_bgn,
1121 uint32_t rank_end = all_ranks ? RW_MGR_MEM_NUMBER_OF_RANKS :
1122 (rank_bgn + NUM_RANKS_PER_SHADOW_REG);
1124 debug("%s:%d\n", __func__, __LINE__);
1125 for (r = rank_bgn; r < rank_end; r++) {
1126 if (param->skip_ranks[r])
1127 /* request to skip the rank */
1131 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
1133 /* Load up a constant bursts */
1134 writel(0x20, &sdr_rw_load_mgr_regs->load_cntr0);
1136 writel(RW_MGR_GUARANTEED_WRITE_WAIT0,
1137 &sdr_rw_load_jump_mgr_regs->load_jump_add0);
1139 writel(0x20, &sdr_rw_load_mgr_regs->load_cntr1);
1141 writel(RW_MGR_GUARANTEED_WRITE_WAIT1,
1142 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
1144 writel(0x04, &sdr_rw_load_mgr_regs->load_cntr2);
1146 writel(RW_MGR_GUARANTEED_WRITE_WAIT2,
1147 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
1149 writel(0x04, &sdr_rw_load_mgr_regs->load_cntr3);
1151 writel(RW_MGR_GUARANTEED_WRITE_WAIT3,
1152 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
1154 writel(RW_MGR_GUARANTEED_WRITE, SDR_PHYGRP_RWMGRGRP_ADDRESS |
1155 RW_MGR_RUN_SINGLE_GROUP_OFFSET);
1158 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
1162 * try a read and see if it returns correct data back. has dummy reads
1163 * inserted into the mix used to align dqs enable. has more thorough checks
1164 * than the regular read test.
1166 static uint32_t rw_mgr_mem_calibrate_read_test(uint32_t rank_bgn, uint32_t group,
1167 uint32_t num_tries, uint32_t all_correct, uint32_t *bit_chk,
1168 uint32_t all_groups, uint32_t all_ranks)
1171 uint32_t correct_mask_vg;
1172 uint32_t tmp_bit_chk;
1173 uint32_t rank_end = all_ranks ? RW_MGR_MEM_NUMBER_OF_RANKS :
1174 (rank_bgn + NUM_RANKS_PER_SHADOW_REG);
1176 uint32_t base_rw_mgr;
1178 *bit_chk = param->read_correct_mask;
1179 correct_mask_vg = param->read_correct_mask_vg;
1181 uint32_t quick_read_mode = (((STATIC_CALIB_STEPS) &
1182 CALIB_SKIP_DELAY_SWEEPS) && ENABLE_SUPER_QUICK_CALIBRATION);
1184 for (r = rank_bgn; r < rank_end; r++) {
1185 if (param->skip_ranks[r])
1186 /* request to skip the rank */
1190 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
1192 writel(0x10, &sdr_rw_load_mgr_regs->load_cntr1);
1194 writel(RW_MGR_READ_B2B_WAIT1,
1195 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
1197 writel(0x10, &sdr_rw_load_mgr_regs->load_cntr2);
1198 writel(RW_MGR_READ_B2B_WAIT2,
1199 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
1201 if (quick_read_mode)
1202 writel(0x1, &sdr_rw_load_mgr_regs->load_cntr0);
1203 /* need at least two (1+1) reads to capture failures */
1204 else if (all_groups)
1205 writel(0x06, &sdr_rw_load_mgr_regs->load_cntr0);
1207 writel(0x32, &sdr_rw_load_mgr_regs->load_cntr0);
1209 writel(RW_MGR_READ_B2B,
1210 &sdr_rw_load_jump_mgr_regs->load_jump_add0);
1212 writel(RW_MGR_MEM_IF_READ_DQS_WIDTH *
1213 RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS - 1,
1214 &sdr_rw_load_mgr_regs->load_cntr3);
1216 writel(0x0, &sdr_rw_load_mgr_regs->load_cntr3);
1218 writel(RW_MGR_READ_B2B,
1219 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
1222 for (vg = RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS-1; ; vg--) {
1223 /* reset the fifos to get pointers to known state */
1224 writel(0, &phy_mgr_cmd->fifo_reset);
1225 writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS |
1226 RW_MGR_RESET_READ_DATAPATH_OFFSET);
1228 tmp_bit_chk = tmp_bit_chk << (RW_MGR_MEM_DQ_PER_READ_DQS
1229 / RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS);
1232 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_ALL_GROUPS_OFFSET;
1234 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET;
1236 writel(RW_MGR_READ_B2B, addr +
1237 ((group * RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS +
1240 base_rw_mgr = readl(SDR_PHYGRP_RWMGRGRP_ADDRESS);
1241 tmp_bit_chk = tmp_bit_chk | (correct_mask_vg & ~(base_rw_mgr));
1246 *bit_chk &= tmp_bit_chk;
1249 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET;
1250 writel(RW_MGR_CLEAR_DQS_ENABLE, addr + (group << 2));
1253 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
1254 debug_cond(DLEVEL == 2, "%s:%d read_test(%u,ALL,%u) =>\
1255 (%u == %u) => %lu", __func__, __LINE__, group,
1256 all_groups, *bit_chk, param->read_correct_mask,
1257 (long unsigned int)(*bit_chk ==
1258 param->read_correct_mask));
1259 return *bit_chk == param->read_correct_mask;
1261 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
1262 debug_cond(DLEVEL == 2, "%s:%d read_test(%u,ONE,%u) =>\
1263 (%u != %lu) => %lu\n", __func__, __LINE__,
1264 group, all_groups, *bit_chk, (long unsigned int)0,
1265 (long unsigned int)(*bit_chk != 0x00));
1266 return *bit_chk != 0x00;
1270 static uint32_t rw_mgr_mem_calibrate_read_test_all_ranks(uint32_t group,
1271 uint32_t num_tries, uint32_t all_correct, uint32_t *bit_chk,
1272 uint32_t all_groups)
1274 return rw_mgr_mem_calibrate_read_test(0, group, num_tries, all_correct,
1275 bit_chk, all_groups, 1);
1278 static void rw_mgr_incr_vfifo(uint32_t grp, uint32_t *v)
1280 writel(grp, &phy_mgr_cmd->inc_vfifo_hard_phy);
1284 static void rw_mgr_decr_vfifo(uint32_t grp, uint32_t *v)
1288 for (i = 0; i < VFIFO_SIZE-1; i++)
1289 rw_mgr_incr_vfifo(grp, v);
1292 static int find_vfifo_read(uint32_t grp, uint32_t *bit_chk)
1295 uint32_t fail_cnt = 0;
1296 uint32_t test_status;
1298 for (v = 0; v < VFIFO_SIZE; ) {
1299 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: vfifo %u\n",
1300 __func__, __LINE__, v);
1301 test_status = rw_mgr_mem_calibrate_read_test_all_ranks
1302 (grp, 1, PASS_ONE_BIT, bit_chk, 0);
1310 /* fiddle with FIFO */
1311 rw_mgr_incr_vfifo(grp, &v);
1314 if (v >= VFIFO_SIZE) {
1315 /* no failing read found!! Something must have gone wrong */
1316 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: vfifo failed\n",
1317 __func__, __LINE__);
1324 static int find_working_phase(uint32_t *grp, uint32_t *bit_chk,
1325 uint32_t dtaps_per_ptap, uint32_t *work_bgn,
1326 uint32_t *v, uint32_t *d, uint32_t *p,
1327 uint32_t *i, uint32_t *max_working_cnt)
1329 uint32_t found_begin = 0;
1330 uint32_t tmp_delay = 0;
1331 uint32_t test_status;
1333 for (*d = 0; *d <= dtaps_per_ptap; (*d)++, tmp_delay +=
1334 IO_DELAY_PER_DQS_EN_DCHAIN_TAP) {
1335 *work_bgn = tmp_delay;
1336 scc_mgr_set_dqs_en_delay_all_ranks(*grp, *d);
1338 for (*i = 0; *i < VFIFO_SIZE; (*i)++) {
1339 for (*p = 0; *p <= IO_DQS_EN_PHASE_MAX; (*p)++, *work_bgn +=
1340 IO_DELAY_PER_OPA_TAP) {
1341 scc_mgr_set_dqs_en_phase_all_ranks(*grp, *p);
1344 rw_mgr_mem_calibrate_read_test_all_ranks
1345 (*grp, 1, PASS_ONE_BIT, bit_chk, 0);
1348 *max_working_cnt = 1;
1357 if (*p > IO_DQS_EN_PHASE_MAX)
1358 /* fiddle with FIFO */
1359 rw_mgr_incr_vfifo(*grp, v);
1366 if (*i >= VFIFO_SIZE) {
1367 /* cannot find working solution */
1368 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: no vfifo/\
1369 ptap/dtap\n", __func__, __LINE__);
1376 static void sdr_backup_phase(uint32_t *grp, uint32_t *bit_chk,
1377 uint32_t *work_bgn, uint32_t *v, uint32_t *d,
1378 uint32_t *p, uint32_t *max_working_cnt)
1380 uint32_t found_begin = 0;
1383 /* Special case code for backing up a phase */
1385 *p = IO_DQS_EN_PHASE_MAX;
1386 rw_mgr_decr_vfifo(*grp, v);
1390 tmp_delay = *work_bgn - IO_DELAY_PER_OPA_TAP;
1391 scc_mgr_set_dqs_en_phase_all_ranks(*grp, *p);
1393 for (*d = 0; *d <= IO_DQS_EN_DELAY_MAX && tmp_delay < *work_bgn;
1394 (*d)++, tmp_delay += IO_DELAY_PER_DQS_EN_DCHAIN_TAP) {
1395 scc_mgr_set_dqs_en_delay_all_ranks(*grp, *d);
1397 if (rw_mgr_mem_calibrate_read_test_all_ranks(*grp, 1,
1401 *work_bgn = tmp_delay;
1406 /* We have found a working dtap before the ptap found above */
1407 if (found_begin == 1)
1408 (*max_working_cnt)++;
1411 * Restore VFIFO to old state before we decremented it
1415 if (*p > IO_DQS_EN_PHASE_MAX) {
1417 rw_mgr_incr_vfifo(*grp, v);
1420 scc_mgr_set_dqs_en_delay_all_ranks(*grp, 0);
1423 static int sdr_nonworking_phase(uint32_t *grp, uint32_t *bit_chk,
1424 uint32_t *work_bgn, uint32_t *v, uint32_t *d,
1425 uint32_t *p, uint32_t *i, uint32_t *max_working_cnt,
1428 uint32_t found_end = 0;
1431 *work_end += IO_DELAY_PER_OPA_TAP;
1432 if (*p > IO_DQS_EN_PHASE_MAX) {
1433 /* fiddle with FIFO */
1435 rw_mgr_incr_vfifo(*grp, v);
1438 for (; *i < VFIFO_SIZE + 1; (*i)++) {
1439 for (; *p <= IO_DQS_EN_PHASE_MAX; (*p)++, *work_end
1440 += IO_DELAY_PER_OPA_TAP) {
1441 scc_mgr_set_dqs_en_phase_all_ranks(*grp, *p);
1443 if (!rw_mgr_mem_calibrate_read_test_all_ranks
1444 (*grp, 1, PASS_ONE_BIT, bit_chk, 0)) {
1448 (*max_working_cnt)++;
1455 if (*p > IO_DQS_EN_PHASE_MAX) {
1456 /* fiddle with FIFO */
1457 rw_mgr_incr_vfifo(*grp, v);
1462 if (*i >= VFIFO_SIZE + 1) {
1463 /* cannot see edge of failing read */
1464 debug_cond(DLEVEL == 2, "%s:%d sdr_nonworking_phase: end:\
1465 failed\n", __func__, __LINE__);
1472 static int sdr_find_window_centre(uint32_t *grp, uint32_t *bit_chk,
1473 uint32_t *work_bgn, uint32_t *v, uint32_t *d,
1474 uint32_t *p, uint32_t *work_mid,
1480 *work_mid = (*work_bgn + *work_end) / 2;
1482 debug_cond(DLEVEL == 2, "work_bgn=%d work_end=%d work_mid=%d\n",
1483 *work_bgn, *work_end, *work_mid);
1484 /* Get the middle delay to be less than a VFIFO delay */
1485 for (*p = 0; *p <= IO_DQS_EN_PHASE_MAX;
1486 (*p)++, tmp_delay += IO_DELAY_PER_OPA_TAP)
1488 debug_cond(DLEVEL == 2, "vfifo ptap delay %d\n", tmp_delay);
1489 while (*work_mid > tmp_delay)
1490 *work_mid -= tmp_delay;
1491 debug_cond(DLEVEL == 2, "new work_mid %d\n", *work_mid);
1494 for (*p = 0; *p <= IO_DQS_EN_PHASE_MAX && tmp_delay < *work_mid;
1495 (*p)++, tmp_delay += IO_DELAY_PER_OPA_TAP)
1497 tmp_delay -= IO_DELAY_PER_OPA_TAP;
1498 debug_cond(DLEVEL == 2, "new p %d, tmp_delay=%d\n", (*p) - 1, tmp_delay);
1499 for (*d = 0; *d <= IO_DQS_EN_DELAY_MAX && tmp_delay < *work_mid; (*d)++,
1500 tmp_delay += IO_DELAY_PER_DQS_EN_DCHAIN_TAP)
1502 debug_cond(DLEVEL == 2, "new d %d, tmp_delay=%d\n", *d, tmp_delay);
1504 scc_mgr_set_dqs_en_phase_all_ranks(*grp, (*p) - 1);
1505 scc_mgr_set_dqs_en_delay_all_ranks(*grp, *d);
1508 * push vfifo until we can successfully calibrate. We can do this
1509 * because the largest possible margin in 1 VFIFO cycle.
1511 for (i = 0; i < VFIFO_SIZE; i++) {
1512 debug_cond(DLEVEL == 2, "find_dqs_en_phase: center: vfifo=%u\n",
1514 if (rw_mgr_mem_calibrate_read_test_all_ranks(*grp, 1,
1520 /* fiddle with FIFO */
1521 rw_mgr_incr_vfifo(*grp, v);
1524 if (i >= VFIFO_SIZE) {
1525 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: center: \
1526 failed\n", __func__, __LINE__);
1533 /* find a good dqs enable to use */
1534 static uint32_t rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(uint32_t grp)
1536 uint32_t v, d, p, i;
1537 uint32_t max_working_cnt;
1539 uint32_t dtaps_per_ptap;
1540 uint32_t work_bgn, work_mid, work_end;
1541 uint32_t found_passing_read, found_failing_read, initial_failing_dtap;
1543 debug("%s:%d %u\n", __func__, __LINE__, grp);
1545 reg_file_set_sub_stage(CAL_SUBSTAGE_VFIFO_CENTER);
1547 scc_mgr_set_dqs_en_delay_all_ranks(grp, 0);
1548 scc_mgr_set_dqs_en_phase_all_ranks(grp, 0);
1550 /* ************************************************************** */
1551 /* * Step 0 : Determine number of delay taps for each phase tap * */
1552 dtaps_per_ptap = IO_DELAY_PER_OPA_TAP/IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
1554 /* ********************************************************* */
1555 /* * Step 1 : First push vfifo until we get a failing read * */
1556 v = find_vfifo_read(grp, &bit_chk);
1558 max_working_cnt = 0;
1560 /* ******************************************************** */
1561 /* * step 2: find first working phase, increment in ptaps * */
1563 if (find_working_phase(&grp, &bit_chk, dtaps_per_ptap, &work_bgn, &v, &d,
1564 &p, &i, &max_working_cnt) == 0)
1567 work_end = work_bgn;
1570 * If d is 0 then the working window covers a phase tap and
1571 * we can follow the old procedure otherwise, we've found the beginning,
1572 * and we need to increment the dtaps until we find the end.
1575 /* ********************************************************* */
1576 /* * step 3a: if we have room, back off by one and
1577 increment in dtaps * */
1579 sdr_backup_phase(&grp, &bit_chk, &work_bgn, &v, &d, &p,
1582 /* ********************************************************* */
1583 /* * step 4a: go forward from working phase to non working
1584 phase, increment in ptaps * */
1585 if (sdr_nonworking_phase(&grp, &bit_chk, &work_bgn, &v, &d, &p,
1586 &i, &max_working_cnt, &work_end) == 0)
1589 /* ********************************************************* */
1590 /* * step 5a: back off one from last, increment in dtaps * */
1592 /* Special case code for backing up a phase */
1594 p = IO_DQS_EN_PHASE_MAX;
1595 rw_mgr_decr_vfifo(grp, &v);
1600 work_end -= IO_DELAY_PER_OPA_TAP;
1601 scc_mgr_set_dqs_en_phase_all_ranks(grp, p);
1603 /* * The actual increment of dtaps is done outside of
1604 the if/else loop to share code */
1607 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: v/p: \
1608 vfifo=%u ptap=%u\n", __func__, __LINE__,
1611 /* ******************************************************* */
1612 /* * step 3-5b: Find the right edge of the window using
1614 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase:vfifo=%u \
1615 ptap=%u dtap=%u bgn=%u\n", __func__, __LINE__,
1618 work_end = work_bgn;
1620 /* * The actual increment of dtaps is done outside of the
1621 if/else loop to share code */
1623 /* Only here to counterbalance a subtract later on which is
1624 not needed if this branch of the algorithm is taken */
1628 /* The dtap increment to find the failing edge is done here */
1629 for (; d <= IO_DQS_EN_DELAY_MAX; d++, work_end +=
1630 IO_DELAY_PER_DQS_EN_DCHAIN_TAP) {
1631 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: \
1632 end-2: dtap=%u\n", __func__, __LINE__, d);
1633 scc_mgr_set_dqs_en_delay_all_ranks(grp, d);
1635 if (!rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1,
1642 /* Go back to working dtap */
1644 work_end -= IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
1646 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: v/p/d: vfifo=%u \
1647 ptap=%u dtap=%u end=%u\n", __func__, __LINE__,
1648 v, p, d-1, work_end);
1650 if (work_end < work_bgn) {
1652 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: end-2: \
1653 failed\n", __func__, __LINE__);
1657 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: found range [%u,%u]\n",
1658 __func__, __LINE__, work_bgn, work_end);
1660 /* *************************************************************** */
1662 * * We need to calculate the number of dtaps that equal a ptap
1663 * * To do that we'll back up a ptap and re-find the edge of the
1664 * * window using dtaps
1667 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: calculate dtaps_per_ptap \
1668 for tracking\n", __func__, __LINE__);
1670 /* Special case code for backing up a phase */
1672 p = IO_DQS_EN_PHASE_MAX;
1673 rw_mgr_decr_vfifo(grp, &v);
1674 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: backedup \
1675 cycle/phase: v=%u p=%u\n", __func__, __LINE__,
1679 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: backedup \
1680 phase only: v=%u p=%u", __func__, __LINE__,
1684 scc_mgr_set_dqs_en_phase_all_ranks(grp, p);
1687 * Increase dtap until we first see a passing read (in case the
1688 * window is smaller than a ptap),
1689 * and then a failing read to mark the edge of the window again
1692 /* Find a passing read */
1693 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: find passing read\n",
1694 __func__, __LINE__);
1695 found_passing_read = 0;
1696 found_failing_read = 0;
1697 initial_failing_dtap = d;
1698 for (; d <= IO_DQS_EN_DELAY_MAX; d++) {
1699 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: testing \
1700 read d=%u\n", __func__, __LINE__, d);
1701 scc_mgr_set_dqs_en_delay_all_ranks(grp, d);
1703 if (rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1,
1706 found_passing_read = 1;
1711 if (found_passing_read) {
1712 /* Find a failing read */
1713 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: find failing \
1714 read\n", __func__, __LINE__);
1715 for (d = d + 1; d <= IO_DQS_EN_DELAY_MAX; d++) {
1716 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: \
1717 testing read d=%u\n", __func__, __LINE__, d);
1718 scc_mgr_set_dqs_en_delay_all_ranks(grp, d);
1720 if (!rw_mgr_mem_calibrate_read_test_all_ranks
1721 (grp, 1, PASS_ONE_BIT, &bit_chk, 0)) {
1722 found_failing_read = 1;
1727 debug_cond(DLEVEL == 1, "%s:%d find_dqs_en_phase: failed to \
1728 calculate dtaps", __func__, __LINE__);
1729 debug_cond(DLEVEL == 1, "per ptap. Fall back on static value\n");
1733 * The dynamically calculated dtaps_per_ptap is only valid if we
1734 * found a passing/failing read. If we didn't, it means d hit the max
1735 * (IO_DQS_EN_DELAY_MAX). Otherwise, dtaps_per_ptap retains its
1736 * statically calculated value.
1738 if (found_passing_read && found_failing_read)
1739 dtaps_per_ptap = d - initial_failing_dtap;
1741 writel(dtaps_per_ptap, &sdr_reg_file->dtaps_per_ptap);
1742 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: dtaps_per_ptap=%u \
1743 - %u = %u", __func__, __LINE__, d,
1744 initial_failing_dtap, dtaps_per_ptap);
1746 /* ******************************************** */
1747 /* * step 6: Find the centre of the window * */
1748 if (sdr_find_window_centre(&grp, &bit_chk, &work_bgn, &v, &d, &p,
1749 &work_mid, &work_end) == 0)
1752 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: center found: \
1753 vfifo=%u ptap=%u dtap=%u\n", __func__, __LINE__,
1759 * Try rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase across different
1760 * dq_in_delay values
1763 rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase_sweep_dq_in_delay
1764 (uint32_t write_group, uint32_t read_group, uint32_t test_bgn)
1772 const uint32_t delay_step = IO_IO_IN_DELAY_MAX /
1773 (RW_MGR_MEM_DQ_PER_READ_DQS-1);
1774 /* we start at zero, so have one less dq to devide among */
1776 debug("%s:%d (%u,%u,%u)", __func__, __LINE__, write_group, read_group,
1779 /* try different dq_in_delays since the dq path is shorter than dqs */
1781 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
1782 r += NUM_RANKS_PER_SHADOW_REG) {
1783 for (i = 0, p = test_bgn, d = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++, p++, d += delay_step) {
1784 debug_cond(DLEVEL == 1, "%s:%d rw_mgr_mem_calibrate_\
1785 vfifo_find_dqs_", __func__, __LINE__);
1786 debug_cond(DLEVEL == 1, "en_phase_sweep_dq_in_delay: g=%u/%u ",
1787 write_group, read_group);
1788 debug_cond(DLEVEL == 1, "r=%u, i=%u p=%u d=%u\n", r, i , p, d);
1789 scc_mgr_set_dq_in_delay(p, d);
1792 writel(0, &sdr_scc_mgr->update);
1795 found = rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(read_group);
1797 debug_cond(DLEVEL == 1, "%s:%d rw_mgr_mem_calibrate_vfifo_find_dqs_\
1798 en_phase_sweep_dq", __func__, __LINE__);
1799 debug_cond(DLEVEL == 1, "_in_delay: g=%u/%u found=%u; Reseting delay \
1800 chain to zero\n", write_group, read_group, found);
1802 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
1803 r += NUM_RANKS_PER_SHADOW_REG) {
1804 for (i = 0, p = test_bgn; i < RW_MGR_MEM_DQ_PER_READ_DQS;
1806 scc_mgr_set_dq_in_delay(p, 0);
1809 writel(0, &sdr_scc_mgr->update);
1815 /* per-bit deskew DQ and center */
1816 static uint32_t rw_mgr_mem_calibrate_vfifo_center(uint32_t rank_bgn,
1817 uint32_t write_group, uint32_t read_group, uint32_t test_bgn,
1818 uint32_t use_read_test, uint32_t update_fom)
1820 uint32_t i, p, d, min_index;
1822 * Store these as signed since there are comparisons with
1826 uint32_t sticky_bit_chk;
1827 int32_t left_edge[RW_MGR_MEM_DQ_PER_READ_DQS];
1828 int32_t right_edge[RW_MGR_MEM_DQ_PER_READ_DQS];
1829 int32_t final_dq[RW_MGR_MEM_DQ_PER_READ_DQS];
1831 int32_t orig_mid_min, mid_min;
1832 int32_t new_dqs, start_dqs, start_dqs_en, shift_dq, final_dqs,
1834 int32_t dq_margin, dqs_margin;
1836 uint32_t temp_dq_in_delay1, temp_dq_in_delay2;
1839 debug("%s:%d: %u %u", __func__, __LINE__, read_group, test_bgn);
1841 addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_DQS_IN_DELAY_OFFSET;
1842 start_dqs = readl(addr + (read_group << 2));
1843 if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS)
1844 start_dqs_en = readl(addr + ((read_group << 2)
1845 - IO_DQS_EN_DELAY_OFFSET));
1847 /* set the left and right edge of each bit to an illegal value */
1848 /* use (IO_IO_IN_DELAY_MAX + 1) as an illegal value */
1850 for (i = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) {
1851 left_edge[i] = IO_IO_IN_DELAY_MAX + 1;
1852 right_edge[i] = IO_IO_IN_DELAY_MAX + 1;
1855 /* Search for the left edge of the window for each bit */
1856 for (d = 0; d <= IO_IO_IN_DELAY_MAX; d++) {
1857 scc_mgr_apply_group_dq_in_delay(write_group, test_bgn, d);
1859 writel(0, &sdr_scc_mgr->update);
1862 * Stop searching when the read test doesn't pass AND when
1863 * we've seen a passing read on every bit.
1865 if (use_read_test) {
1866 stop = !rw_mgr_mem_calibrate_read_test(rank_bgn,
1867 read_group, NUM_READ_PB_TESTS, PASS_ONE_BIT,
1870 rw_mgr_mem_calibrate_write_test(rank_bgn, write_group,
1873 bit_chk = bit_chk >> (RW_MGR_MEM_DQ_PER_READ_DQS *
1874 (read_group - (write_group *
1875 RW_MGR_MEM_IF_READ_DQS_WIDTH /
1876 RW_MGR_MEM_IF_WRITE_DQS_WIDTH)));
1877 stop = (bit_chk == 0);
1879 sticky_bit_chk = sticky_bit_chk | bit_chk;
1880 stop = stop && (sticky_bit_chk == param->read_correct_mask);
1881 debug_cond(DLEVEL == 2, "%s:%d vfifo_center(left): dtap=%u => %u == %u \
1882 && %u", __func__, __LINE__, d,
1884 param->read_correct_mask, stop);
1889 for (i = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) {
1891 /* Remember a passing test as the
1895 /* If a left edge has not been seen yet,
1896 then a future passing test will mark
1897 this edge as the right edge */
1899 IO_IO_IN_DELAY_MAX + 1) {
1900 right_edge[i] = -(d + 1);
1903 bit_chk = bit_chk >> 1;
1908 /* Reset DQ delay chains to 0 */
1909 scc_mgr_apply_group_dq_in_delay(test_bgn, 0);
1911 for (i = RW_MGR_MEM_DQ_PER_READ_DQS - 1;; i--) {
1912 debug_cond(DLEVEL == 2, "%s:%d vfifo_center: left_edge[%u]: \
1913 %d right_edge[%u]: %d\n", __func__, __LINE__,
1914 i, left_edge[i], i, right_edge[i]);
1917 * Check for cases where we haven't found the left edge,
1918 * which makes our assignment of the the right edge invalid.
1919 * Reset it to the illegal value.
1921 if ((left_edge[i] == IO_IO_IN_DELAY_MAX + 1) && (
1922 right_edge[i] != IO_IO_IN_DELAY_MAX + 1)) {
1923 right_edge[i] = IO_IO_IN_DELAY_MAX + 1;
1924 debug_cond(DLEVEL == 2, "%s:%d vfifo_center: reset \
1925 right_edge[%u]: %d\n", __func__, __LINE__,
1930 * Reset sticky bit (except for bits where we have seen
1931 * both the left and right edge).
1933 sticky_bit_chk = sticky_bit_chk << 1;
1934 if ((left_edge[i] != IO_IO_IN_DELAY_MAX + 1) &&
1935 (right_edge[i] != IO_IO_IN_DELAY_MAX + 1)) {
1936 sticky_bit_chk = sticky_bit_chk | 1;
1943 /* Search for the right edge of the window for each bit */
1944 for (d = 0; d <= IO_DQS_IN_DELAY_MAX - start_dqs; d++) {
1945 scc_mgr_set_dqs_bus_in_delay(read_group, d + start_dqs);
1946 if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) {
1947 uint32_t delay = d + start_dqs_en;
1948 if (delay > IO_DQS_EN_DELAY_MAX)
1949 delay = IO_DQS_EN_DELAY_MAX;
1950 scc_mgr_set_dqs_en_delay(read_group, delay);
1952 scc_mgr_load_dqs(read_group);
1954 writel(0, &sdr_scc_mgr->update);
1957 * Stop searching when the read test doesn't pass AND when
1958 * we've seen a passing read on every bit.
1960 if (use_read_test) {
1961 stop = !rw_mgr_mem_calibrate_read_test(rank_bgn,
1962 read_group, NUM_READ_PB_TESTS, PASS_ONE_BIT,
1965 rw_mgr_mem_calibrate_write_test(rank_bgn, write_group,
1968 bit_chk = bit_chk >> (RW_MGR_MEM_DQ_PER_READ_DQS *
1969 (read_group - (write_group *
1970 RW_MGR_MEM_IF_READ_DQS_WIDTH /
1971 RW_MGR_MEM_IF_WRITE_DQS_WIDTH)));
1972 stop = (bit_chk == 0);
1974 sticky_bit_chk = sticky_bit_chk | bit_chk;
1975 stop = stop && (sticky_bit_chk == param->read_correct_mask);
1977 debug_cond(DLEVEL == 2, "%s:%d vfifo_center(right): dtap=%u => %u == \
1978 %u && %u", __func__, __LINE__, d,
1979 sticky_bit_chk, param->read_correct_mask, stop);
1984 for (i = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) {
1986 /* Remember a passing test as
1991 /* If a right edge has not been
1992 seen yet, then a future passing
1993 test will mark this edge as the
1995 if (right_edge[i] ==
1996 IO_IO_IN_DELAY_MAX + 1) {
1997 left_edge[i] = -(d + 1);
2000 /* d = 0 failed, but it passed
2001 when testing the left edge,
2002 so it must be marginal,
2004 if (right_edge[i] ==
2005 IO_IO_IN_DELAY_MAX + 1 &&
2011 /* If a right edge has not been
2012 seen yet, then a future passing
2013 test will mark this edge as the
2015 else if (right_edge[i] ==
2016 IO_IO_IN_DELAY_MAX +
2018 left_edge[i] = -(d + 1);
2023 debug_cond(DLEVEL == 2, "%s:%d vfifo_center[r,\
2024 d=%u]: ", __func__, __LINE__, d);
2025 debug_cond(DLEVEL == 2, "bit_chk_test=%d left_edge[%u]: %d ",
2026 (int)(bit_chk & 1), i, left_edge[i]);
2027 debug_cond(DLEVEL == 2, "right_edge[%u]: %d\n", i,
2029 bit_chk = bit_chk >> 1;
2034 /* Check that all bits have a window */
2035 for (i = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) {
2036 debug_cond(DLEVEL == 2, "%s:%d vfifo_center: left_edge[%u]: \
2037 %d right_edge[%u]: %d", __func__, __LINE__,
2038 i, left_edge[i], i, right_edge[i]);
2039 if ((left_edge[i] == IO_IO_IN_DELAY_MAX + 1) || (right_edge[i]
2040 == IO_IO_IN_DELAY_MAX + 1)) {
2042 * Restore delay chain settings before letting the loop
2043 * in rw_mgr_mem_calibrate_vfifo to retry different
2044 * dqs/ck relationships.
2046 scc_mgr_set_dqs_bus_in_delay(read_group, start_dqs);
2047 if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) {
2048 scc_mgr_set_dqs_en_delay(read_group,
2051 scc_mgr_load_dqs(read_group);
2052 writel(0, &sdr_scc_mgr->update);
2054 debug_cond(DLEVEL == 1, "%s:%d vfifo_center: failed to \
2055 find edge [%u]: %d %d", __func__, __LINE__,
2056 i, left_edge[i], right_edge[i]);
2057 if (use_read_test) {
2058 set_failing_group_stage(read_group *
2059 RW_MGR_MEM_DQ_PER_READ_DQS + i,
2061 CAL_SUBSTAGE_VFIFO_CENTER);
2063 set_failing_group_stage(read_group *
2064 RW_MGR_MEM_DQ_PER_READ_DQS + i,
2065 CAL_STAGE_VFIFO_AFTER_WRITES,
2066 CAL_SUBSTAGE_VFIFO_CENTER);
2072 /* Find middle of window for each DQ bit */
2073 mid_min = left_edge[0] - right_edge[0];
2075 for (i = 1; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) {
2076 mid = left_edge[i] - right_edge[i];
2077 if (mid < mid_min) {
2084 * -mid_min/2 represents the amount that we need to move DQS.
2085 * If mid_min is odd and positive we'll need to add one to
2086 * make sure the rounding in further calculations is correct
2087 * (always bias to the right), so just add 1 for all positive values.
2092 mid_min = mid_min / 2;
2094 debug_cond(DLEVEL == 1, "%s:%d vfifo_center: mid_min=%d (index=%u)\n",
2095 __func__, __LINE__, mid_min, min_index);
2097 /* Determine the amount we can change DQS (which is -mid_min) */
2098 orig_mid_min = mid_min;
2099 new_dqs = start_dqs - mid_min;
2100 if (new_dqs > IO_DQS_IN_DELAY_MAX)
2101 new_dqs = IO_DQS_IN_DELAY_MAX;
2102 else if (new_dqs < 0)
2105 mid_min = start_dqs - new_dqs;
2106 debug_cond(DLEVEL == 1, "vfifo_center: new mid_min=%d new_dqs=%d\n",
2109 if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) {
2110 if (start_dqs_en - mid_min > IO_DQS_EN_DELAY_MAX)
2111 mid_min += start_dqs_en - mid_min - IO_DQS_EN_DELAY_MAX;
2112 else if (start_dqs_en - mid_min < 0)
2113 mid_min += start_dqs_en - mid_min;
2115 new_dqs = start_dqs - mid_min;
2117 debug_cond(DLEVEL == 1, "vfifo_center: start_dqs=%d start_dqs_en=%d \
2118 new_dqs=%d mid_min=%d\n", start_dqs,
2119 IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS ? start_dqs_en : -1,
2122 /* Initialize data for export structures */
2123 dqs_margin = IO_IO_IN_DELAY_MAX + 1;
2124 dq_margin = IO_IO_IN_DELAY_MAX + 1;
2126 /* add delay to bring centre of all DQ windows to the same "level" */
2127 for (i = 0, p = test_bgn; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++, p++) {
2128 /* Use values before divide by 2 to reduce round off error */
2129 shift_dq = (left_edge[i] - right_edge[i] -
2130 (left_edge[min_index] - right_edge[min_index]))/2 +
2131 (orig_mid_min - mid_min);
2133 debug_cond(DLEVEL == 2, "vfifo_center: before: \
2134 shift_dq[%u]=%d\n", i, shift_dq);
2136 addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_IO_IN_DELAY_OFFSET;
2137 temp_dq_in_delay1 = readl(addr + (p << 2));
2138 temp_dq_in_delay2 = readl(addr + (i << 2));
2140 if (shift_dq + (int32_t)temp_dq_in_delay1 >
2141 (int32_t)IO_IO_IN_DELAY_MAX) {
2142 shift_dq = (int32_t)IO_IO_IN_DELAY_MAX - temp_dq_in_delay2;
2143 } else if (shift_dq + (int32_t)temp_dq_in_delay1 < 0) {
2144 shift_dq = -(int32_t)temp_dq_in_delay1;
2146 debug_cond(DLEVEL == 2, "vfifo_center: after: \
2147 shift_dq[%u]=%d\n", i, shift_dq);
2148 final_dq[i] = temp_dq_in_delay1 + shift_dq;
2149 scc_mgr_set_dq_in_delay(p, final_dq[i]);
2152 debug_cond(DLEVEL == 2, "vfifo_center: margin[%u]=[%d,%d]\n", i,
2153 left_edge[i] - shift_dq + (-mid_min),
2154 right_edge[i] + shift_dq - (-mid_min));
2155 /* To determine values for export structures */
2156 if (left_edge[i] - shift_dq + (-mid_min) < dq_margin)
2157 dq_margin = left_edge[i] - shift_dq + (-mid_min);
2159 if (right_edge[i] + shift_dq - (-mid_min) < dqs_margin)
2160 dqs_margin = right_edge[i] + shift_dq - (-mid_min);
2163 final_dqs = new_dqs;
2164 if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS)
2165 final_dqs_en = start_dqs_en - mid_min;
2168 if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) {
2169 scc_mgr_set_dqs_en_delay(read_group, final_dqs_en);
2170 scc_mgr_load_dqs(read_group);
2174 scc_mgr_set_dqs_bus_in_delay(read_group, final_dqs);
2175 scc_mgr_load_dqs(read_group);
2176 debug_cond(DLEVEL == 2, "%s:%d vfifo_center: dq_margin=%d \
2177 dqs_margin=%d", __func__, __LINE__,
2178 dq_margin, dqs_margin);
2181 * Do not remove this line as it makes sure all of our decisions
2182 * have been applied. Apply the update bit.
2184 writel(0, &sdr_scc_mgr->update);
2186 return (dq_margin >= 0) && (dqs_margin >= 0);
2190 * calibrate the read valid prediction FIFO.
2192 * - read valid prediction will consist of finding a good DQS enable phase,
2193 * DQS enable delay, DQS input phase, and DQS input delay.
2194 * - we also do a per-bit deskew on the DQ lines.
2196 static uint32_t rw_mgr_mem_calibrate_vfifo(uint32_t read_group,
2199 uint32_t p, d, rank_bgn, sr;
2200 uint32_t dtaps_per_ptap;
2202 uint32_t grp_calibrated;
2203 uint32_t write_group, write_test_bgn;
2204 uint32_t failed_substage;
2206 debug("%s:%d: %u %u\n", __func__, __LINE__, read_group, test_bgn);
2208 /* update info for sims */
2209 reg_file_set_stage(CAL_STAGE_VFIFO);
2211 write_group = read_group;
2212 write_test_bgn = test_bgn;
2214 /* USER Determine number of delay taps for each phase tap */
2215 dtaps_per_ptap = DIV_ROUND_UP(IO_DELAY_PER_OPA_TAP,
2216 IO_DELAY_PER_DQS_EN_DCHAIN_TAP) - 1;
2218 /* update info for sims */
2219 reg_file_set_group(read_group);
2223 reg_file_set_sub_stage(CAL_SUBSTAGE_GUARANTEED_READ);
2224 failed_substage = CAL_SUBSTAGE_GUARANTEED_READ;
2226 for (d = 0; d <= dtaps_per_ptap && grp_calibrated == 0; d += 2) {
2228 * In RLDRAMX we may be messing the delay of pins in
2229 * the same write group but outside of the current read
2230 * the group, but that's ok because we haven't
2231 * calibrated output side yet.
2234 scc_mgr_apply_group_all_out_delay_add_all_ranks(
2238 for (p = 0; p <= IO_DQDQS_OUT_PHASE_MAX && grp_calibrated == 0;
2240 /* set a particular dqdqs phase */
2241 scc_mgr_set_dqdqs_output_phase_all_ranks(read_group, p);
2243 debug_cond(DLEVEL == 1, "%s:%d calibrate_vfifo: g=%u \
2244 p=%u d=%u\n", __func__, __LINE__,
2248 * Load up the patterns used by read calibration
2249 * using current DQDQS phase.
2251 rw_mgr_mem_calibrate_read_load_patterns(0, 1);
2252 if (!(gbl->phy_debug_mode_flags &
2253 PHY_DEBUG_DISABLE_GUARANTEED_READ)) {
2254 if (!rw_mgr_mem_calibrate_read_test_patterns_all_ranks
2255 (read_group, 1, &bit_chk)) {
2256 debug_cond(DLEVEL == 1, "%s:%d Guaranteed read test failed:",
2257 __func__, __LINE__);
2258 debug_cond(DLEVEL == 1, " g=%u p=%u d=%u\n",
2266 if (rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase_sweep_dq_in_delay
2267 (write_group, read_group, test_bgn)) {
2269 * USER Read per-bit deskew can be done on a
2270 * per shadow register basis.
2272 for (rank_bgn = 0, sr = 0;
2273 rank_bgn < RW_MGR_MEM_NUMBER_OF_RANKS;
2274 rank_bgn += NUM_RANKS_PER_SHADOW_REG,
2277 * Determine if this set of ranks
2278 * should be skipped entirely.
2280 if (!param->skip_shadow_regs[sr]) {
2282 * If doing read after write
2283 * calibration, do not update
2284 * FOM, now - do it then.
2286 if (!rw_mgr_mem_calibrate_vfifo_center
2287 (rank_bgn, write_group,
2288 read_group, test_bgn, 1, 0)) {
2291 CAL_SUBSTAGE_VFIFO_CENTER;
2297 failed_substage = CAL_SUBSTAGE_DQS_EN_PHASE;
2302 if (grp_calibrated == 0) {
2303 set_failing_group_stage(write_group, CAL_STAGE_VFIFO,
2309 * Reset the delay chains back to zero if they have moved > 1
2310 * (check for > 1 because loop will increase d even when pass in
2314 scc_mgr_zero_group(write_group, 1);
2319 /* VFIFO Calibration -- Read Deskew Calibration after write deskew */
2320 static uint32_t rw_mgr_mem_calibrate_vfifo_end(uint32_t read_group,
2323 uint32_t rank_bgn, sr;
2324 uint32_t grp_calibrated;
2325 uint32_t write_group;
2327 debug("%s:%d %u %u", __func__, __LINE__, read_group, test_bgn);
2329 /* update info for sims */
2331 reg_file_set_stage(CAL_STAGE_VFIFO_AFTER_WRITES);
2332 reg_file_set_sub_stage(CAL_SUBSTAGE_VFIFO_CENTER);
2334 write_group = read_group;
2336 /* update info for sims */
2337 reg_file_set_group(read_group);
2340 /* Read per-bit deskew can be done on a per shadow register basis */
2341 for (rank_bgn = 0, sr = 0; rank_bgn < RW_MGR_MEM_NUMBER_OF_RANKS;
2342 rank_bgn += NUM_RANKS_PER_SHADOW_REG, ++sr) {
2343 /* Determine if this set of ranks should be skipped entirely */
2344 if (!param->skip_shadow_regs[sr]) {
2345 /* This is the last calibration round, update FOM here */
2346 if (!rw_mgr_mem_calibrate_vfifo_center(rank_bgn,
2357 if (grp_calibrated == 0) {
2358 set_failing_group_stage(write_group,
2359 CAL_STAGE_VFIFO_AFTER_WRITES,
2360 CAL_SUBSTAGE_VFIFO_CENTER);
2367 /* Calibrate LFIFO to find smallest read latency */
2368 static uint32_t rw_mgr_mem_calibrate_lfifo(void)
2373 debug("%s:%d\n", __func__, __LINE__);
2375 /* update info for sims */
2376 reg_file_set_stage(CAL_STAGE_LFIFO);
2377 reg_file_set_sub_stage(CAL_SUBSTAGE_READ_LATENCY);
2379 /* Load up the patterns used by read calibration for all ranks */
2380 rw_mgr_mem_calibrate_read_load_patterns(0, 1);
2384 writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat);
2385 debug_cond(DLEVEL == 2, "%s:%d lfifo: read_lat=%u",
2386 __func__, __LINE__, gbl->curr_read_lat);
2388 if (!rw_mgr_mem_calibrate_read_test_all_ranks(0,
2396 /* reduce read latency and see if things are working */
2398 gbl->curr_read_lat--;
2399 } while (gbl->curr_read_lat > 0);
2401 /* reset the fifos to get pointers to known state */
2403 writel(0, &phy_mgr_cmd->fifo_reset);
2406 /* add a fudge factor to the read latency that was determined */
2407 gbl->curr_read_lat += 2;
2408 writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat);
2409 debug_cond(DLEVEL == 2, "%s:%d lfifo: success: using \
2410 read_lat=%u\n", __func__, __LINE__,
2411 gbl->curr_read_lat);
2414 set_failing_group_stage(0xff, CAL_STAGE_LFIFO,
2415 CAL_SUBSTAGE_READ_LATENCY);
2417 debug_cond(DLEVEL == 2, "%s:%d lfifo: failed at initial \
2418 read_lat=%u\n", __func__, __LINE__,
2419 gbl->curr_read_lat);
2425 * issue write test command.
2426 * two variants are provided. one that just tests a write pattern and
2427 * another that tests datamask functionality.
2429 static void rw_mgr_mem_calibrate_write_test_issue(uint32_t group,
2432 uint32_t mcc_instruction;
2433 uint32_t quick_write_mode = (((STATIC_CALIB_STEPS) & CALIB_SKIP_WRITES) &&
2434 ENABLE_SUPER_QUICK_CALIBRATION);
2435 uint32_t rw_wl_nop_cycles;
2439 * Set counter and jump addresses for the right
2440 * number of NOP cycles.
2441 * The number of supported NOP cycles can range from -1 to infinity
2442 * Three different cases are handled:
2444 * 1. For a number of NOP cycles greater than 0, the RW Mgr looping
2445 * mechanism will be used to insert the right number of NOPs
2447 * 2. For a number of NOP cycles equals to 0, the micro-instruction
2448 * issuing the write command will jump straight to the
2449 * micro-instruction that turns on DQS (for DDRx), or outputs write
2450 * data (for RLD), skipping
2451 * the NOP micro-instruction all together
2453 * 3. A number of NOP cycles equal to -1 indicates that DQS must be
2454 * turned on in the same micro-instruction that issues the write
2455 * command. Then we need
2456 * to directly jump to the micro-instruction that sends out the data
2458 * NOTE: Implementing this mechanism uses 2 RW Mgr jump-counters
2459 * (2 and 3). One jump-counter (0) is used to perform multiple
2460 * write-read operations.
2461 * one counter left to issue this command in "multiple-group" mode
2464 rw_wl_nop_cycles = gbl->rw_wl_nop_cycles;
2466 if (rw_wl_nop_cycles == -1) {
2468 * CNTR 2 - We want to execute the special write operation that
2469 * turns on DQS right away and then skip directly to the
2470 * instruction that sends out the data. We set the counter to a
2471 * large number so that the jump is always taken.
2473 writel(0xFF, &sdr_rw_load_mgr_regs->load_cntr2);
2475 /* CNTR 3 - Not used */
2477 mcc_instruction = RW_MGR_LFSR_WR_RD_DM_BANK_0_WL_1;
2478 writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_DATA,
2479 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
2480 writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_NOP,
2481 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
2483 mcc_instruction = RW_MGR_LFSR_WR_RD_BANK_0_WL_1;
2484 writel(RW_MGR_LFSR_WR_RD_BANK_0_DATA,
2485 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
2486 writel(RW_MGR_LFSR_WR_RD_BANK_0_NOP,
2487 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
2489 } else if (rw_wl_nop_cycles == 0) {
2491 * CNTR 2 - We want to skip the NOP operation and go straight
2492 * to the DQS enable instruction. We set the counter to a large
2493 * number so that the jump is always taken.
2495 writel(0xFF, &sdr_rw_load_mgr_regs->load_cntr2);
2497 /* CNTR 3 - Not used */
2499 mcc_instruction = RW_MGR_LFSR_WR_RD_DM_BANK_0;
2500 writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_DQS,
2501 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
2503 mcc_instruction = RW_MGR_LFSR_WR_RD_BANK_0;
2504 writel(RW_MGR_LFSR_WR_RD_BANK_0_DQS,
2505 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
2509 * CNTR 2 - In this case we want to execute the next instruction
2510 * and NOT take the jump. So we set the counter to 0. The jump
2511 * address doesn't count.
2513 writel(0x0, &sdr_rw_load_mgr_regs->load_cntr2);
2514 writel(0x0, &sdr_rw_load_jump_mgr_regs->load_jump_add2);
2517 * CNTR 3 - Set the nop counter to the number of cycles we
2518 * need to loop for, minus 1.
2520 writel(rw_wl_nop_cycles - 1, &sdr_rw_load_mgr_regs->load_cntr3);
2522 mcc_instruction = RW_MGR_LFSR_WR_RD_DM_BANK_0;
2523 writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_NOP,
2524 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
2526 mcc_instruction = RW_MGR_LFSR_WR_RD_BANK_0;
2527 writel(RW_MGR_LFSR_WR_RD_BANK_0_NOP,
2528 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
2532 writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS |
2533 RW_MGR_RESET_READ_DATAPATH_OFFSET);
2535 if (quick_write_mode)
2536 writel(0x08, &sdr_rw_load_mgr_regs->load_cntr0);
2538 writel(0x40, &sdr_rw_load_mgr_regs->load_cntr0);
2540 writel(mcc_instruction, &sdr_rw_load_jump_mgr_regs->load_jump_add0);
2543 * CNTR 1 - This is used to ensure enough time elapses
2544 * for read data to come back.
2546 writel(0x30, &sdr_rw_load_mgr_regs->load_cntr1);
2549 writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_WAIT,
2550 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
2552 writel(RW_MGR_LFSR_WR_RD_BANK_0_WAIT,
2553 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
2556 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET;
2557 writel(mcc_instruction, addr + (group << 2));
2560 /* Test writes, can check for a single bit pass or multiple bit pass */
2561 static uint32_t rw_mgr_mem_calibrate_write_test(uint32_t rank_bgn,
2562 uint32_t write_group, uint32_t use_dm, uint32_t all_correct,
2563 uint32_t *bit_chk, uint32_t all_ranks)
2566 uint32_t correct_mask_vg;
2567 uint32_t tmp_bit_chk;
2569 uint32_t rank_end = all_ranks ? RW_MGR_MEM_NUMBER_OF_RANKS :
2570 (rank_bgn + NUM_RANKS_PER_SHADOW_REG);
2571 uint32_t addr_rw_mgr;
2572 uint32_t base_rw_mgr;
2574 *bit_chk = param->write_correct_mask;
2575 correct_mask_vg = param->write_correct_mask_vg;
2577 for (r = rank_bgn; r < rank_end; r++) {
2578 if (param->skip_ranks[r]) {
2579 /* request to skip the rank */
2584 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
2587 addr_rw_mgr = SDR_PHYGRP_RWMGRGRP_ADDRESS;
2588 for (vg = RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS-1; ; vg--) {
2589 /* reset the fifos to get pointers to known state */
2590 writel(0, &phy_mgr_cmd->fifo_reset);
2592 tmp_bit_chk = tmp_bit_chk <<
2593 (RW_MGR_MEM_DQ_PER_WRITE_DQS /
2594 RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS);
2595 rw_mgr_mem_calibrate_write_test_issue(write_group *
2596 RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS+vg,
2599 base_rw_mgr = readl(addr_rw_mgr);
2600 tmp_bit_chk = tmp_bit_chk | (correct_mask_vg & ~(base_rw_mgr));
2604 *bit_chk &= tmp_bit_chk;
2608 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
2609 debug_cond(DLEVEL == 2, "write_test(%u,%u,ALL) : %u == \
2610 %u => %lu", write_group, use_dm,
2611 *bit_chk, param->write_correct_mask,
2612 (long unsigned int)(*bit_chk ==
2613 param->write_correct_mask));
2614 return *bit_chk == param->write_correct_mask;
2616 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
2617 debug_cond(DLEVEL == 2, "write_test(%u,%u,ONE) : %u != ",
2618 write_group, use_dm, *bit_chk);
2619 debug_cond(DLEVEL == 2, "%lu" " => %lu", (long unsigned int)0,
2620 (long unsigned int)(*bit_chk != 0));
2621 return *bit_chk != 0x00;
2626 * center all windows. do per-bit-deskew to possibly increase size of
2629 static uint32_t rw_mgr_mem_calibrate_writes_center(uint32_t rank_bgn,
2630 uint32_t write_group, uint32_t test_bgn)
2632 uint32_t i, p, min_index;
2635 * Store these as signed since there are comparisons with
2639 uint32_t sticky_bit_chk;
2640 int32_t left_edge[RW_MGR_MEM_DQ_PER_WRITE_DQS];
2641 int32_t right_edge[RW_MGR_MEM_DQ_PER_WRITE_DQS];
2643 int32_t mid_min, orig_mid_min;
2644 int32_t new_dqs, start_dqs, shift_dq;
2645 int32_t dq_margin, dqs_margin, dm_margin;
2647 uint32_t temp_dq_out1_delay;
2650 debug("%s:%d %u %u", __func__, __LINE__, write_group, test_bgn);
2654 addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_IO_OUT1_DELAY_OFFSET;
2655 start_dqs = readl(addr +
2656 (RW_MGR_MEM_DQ_PER_WRITE_DQS << 2));
2658 /* per-bit deskew */
2661 * set the left and right edge of each bit to an illegal value
2662 * use (IO_IO_OUT1_DELAY_MAX + 1) as an illegal value.
2665 for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
2666 left_edge[i] = IO_IO_OUT1_DELAY_MAX + 1;
2667 right_edge[i] = IO_IO_OUT1_DELAY_MAX + 1;
2670 /* Search for the left edge of the window for each bit */
2671 for (d = 0; d <= IO_IO_OUT1_DELAY_MAX; d++) {
2672 scc_mgr_apply_group_dq_out1_delay(write_group, d);
2674 writel(0, &sdr_scc_mgr->update);
2677 * Stop searching when the read test doesn't pass AND when
2678 * we've seen a passing read on every bit.
2680 stop = !rw_mgr_mem_calibrate_write_test(rank_bgn, write_group,
2681 0, PASS_ONE_BIT, &bit_chk, 0);
2682 sticky_bit_chk = sticky_bit_chk | bit_chk;
2683 stop = stop && (sticky_bit_chk == param->write_correct_mask);
2684 debug_cond(DLEVEL == 2, "write_center(left): dtap=%d => %u \
2685 == %u && %u [bit_chk= %u ]\n",
2686 d, sticky_bit_chk, param->write_correct_mask,
2692 for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
2695 * Remember a passing test as the
2701 * If a left edge has not been seen
2702 * yet, then a future passing test will
2703 * mark this edge as the right edge.
2706 IO_IO_OUT1_DELAY_MAX + 1) {
2707 right_edge[i] = -(d + 1);
2710 debug_cond(DLEVEL == 2, "write_center[l,d=%d):", d);
2711 debug_cond(DLEVEL == 2, "bit_chk_test=%d left_edge[%u]: %d",
2712 (int)(bit_chk & 1), i, left_edge[i]);
2713 debug_cond(DLEVEL == 2, "right_edge[%u]: %d\n", i,
2715 bit_chk = bit_chk >> 1;
2720 /* Reset DQ delay chains to 0 */
2721 scc_mgr_apply_group_dq_out1_delay(0);
2723 for (i = RW_MGR_MEM_DQ_PER_WRITE_DQS - 1;; i--) {
2724 debug_cond(DLEVEL == 2, "%s:%d write_center: left_edge[%u]: \
2725 %d right_edge[%u]: %d\n", __func__, __LINE__,
2726 i, left_edge[i], i, right_edge[i]);
2729 * Check for cases where we haven't found the left edge,
2730 * which makes our assignment of the the right edge invalid.
2731 * Reset it to the illegal value.
2733 if ((left_edge[i] == IO_IO_OUT1_DELAY_MAX + 1) &&
2734 (right_edge[i] != IO_IO_OUT1_DELAY_MAX + 1)) {
2735 right_edge[i] = IO_IO_OUT1_DELAY_MAX + 1;
2736 debug_cond(DLEVEL == 2, "%s:%d write_center: reset \
2737 right_edge[%u]: %d\n", __func__, __LINE__,
2742 * Reset sticky bit (except for bits where we have
2743 * seen the left edge).
2745 sticky_bit_chk = sticky_bit_chk << 1;
2746 if ((left_edge[i] != IO_IO_OUT1_DELAY_MAX + 1))
2747 sticky_bit_chk = sticky_bit_chk | 1;
2753 /* Search for the right edge of the window for each bit */
2754 for (d = 0; d <= IO_IO_OUT1_DELAY_MAX - start_dqs; d++) {
2755 scc_mgr_apply_group_dqs_io_and_oct_out1(write_group,
2758 writel(0, &sdr_scc_mgr->update);
2761 * Stop searching when the read test doesn't pass AND when
2762 * we've seen a passing read on every bit.
2764 stop = !rw_mgr_mem_calibrate_write_test(rank_bgn, write_group,
2765 0, PASS_ONE_BIT, &bit_chk, 0);
2767 sticky_bit_chk = sticky_bit_chk | bit_chk;
2768 stop = stop && (sticky_bit_chk == param->write_correct_mask);
2770 debug_cond(DLEVEL == 2, "write_center (right): dtap=%u => %u == \
2771 %u && %u\n", d, sticky_bit_chk,
2772 param->write_correct_mask, stop);
2776 for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS;
2778 /* d = 0 failed, but it passed when
2779 testing the left edge, so it must be
2780 marginal, set it to -1 */
2781 if (right_edge[i] ==
2782 IO_IO_OUT1_DELAY_MAX + 1 &&
2784 IO_IO_OUT1_DELAY_MAX + 1) {
2791 for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
2794 * Remember a passing test as
2801 * If a right edge has not
2802 * been seen yet, then a future
2803 * passing test will mark this
2804 * edge as the left edge.
2806 if (right_edge[i] ==
2807 IO_IO_OUT1_DELAY_MAX + 1)
2808 left_edge[i] = -(d + 1);
2811 * d = 0 failed, but it passed
2812 * when testing the left edge,
2813 * so it must be marginal, set
2816 if (right_edge[i] ==
2817 IO_IO_OUT1_DELAY_MAX + 1 &&
2819 IO_IO_OUT1_DELAY_MAX + 1)
2822 * If a right edge has not been
2823 * seen yet, then a future
2824 * passing test will mark this
2825 * edge as the left edge.
2827 else if (right_edge[i] ==
2828 IO_IO_OUT1_DELAY_MAX +
2830 left_edge[i] = -(d + 1);
2833 debug_cond(DLEVEL == 2, "write_center[r,d=%d):", d);
2834 debug_cond(DLEVEL == 2, "bit_chk_test=%d left_edge[%u]: %d",
2835 (int)(bit_chk & 1), i, left_edge[i]);
2836 debug_cond(DLEVEL == 2, "right_edge[%u]: %d\n", i,
2838 bit_chk = bit_chk >> 1;
2843 /* Check that all bits have a window */
2844 for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
2845 debug_cond(DLEVEL == 2, "%s:%d write_center: left_edge[%u]: \
2846 %d right_edge[%u]: %d", __func__, __LINE__,
2847 i, left_edge[i], i, right_edge[i]);
2848 if ((left_edge[i] == IO_IO_OUT1_DELAY_MAX + 1) ||
2849 (right_edge[i] == IO_IO_OUT1_DELAY_MAX + 1)) {
2850 set_failing_group_stage(test_bgn + i,
2852 CAL_SUBSTAGE_WRITES_CENTER);
2857 /* Find middle of window for each DQ bit */
2858 mid_min = left_edge[0] - right_edge[0];
2860 for (i = 1; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
2861 mid = left_edge[i] - right_edge[i];
2862 if (mid < mid_min) {
2869 * -mid_min/2 represents the amount that we need to move DQS.
2870 * If mid_min is odd and positive we'll need to add one to
2871 * make sure the rounding in further calculations is correct
2872 * (always bias to the right), so just add 1 for all positive values.
2876 mid_min = mid_min / 2;
2877 debug_cond(DLEVEL == 1, "%s:%d write_center: mid_min=%d\n", __func__,
2880 /* Determine the amount we can change DQS (which is -mid_min) */
2881 orig_mid_min = mid_min;
2882 new_dqs = start_dqs;
2884 debug_cond(DLEVEL == 1, "%s:%d write_center: start_dqs=%d new_dqs=%d \
2885 mid_min=%d\n", __func__, __LINE__, start_dqs, new_dqs, mid_min);
2886 /* Initialize data for export structures */
2887 dqs_margin = IO_IO_OUT1_DELAY_MAX + 1;
2888 dq_margin = IO_IO_OUT1_DELAY_MAX + 1;
2890 /* add delay to bring centre of all DQ windows to the same "level" */
2891 for (i = 0, p = test_bgn; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++, p++) {
2892 /* Use values before divide by 2 to reduce round off error */
2893 shift_dq = (left_edge[i] - right_edge[i] -
2894 (left_edge[min_index] - right_edge[min_index]))/2 +
2895 (orig_mid_min - mid_min);
2897 debug_cond(DLEVEL == 2, "%s:%d write_center: before: shift_dq \
2898 [%u]=%d\n", __func__, __LINE__, i, shift_dq);
2900 addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_IO_OUT1_DELAY_OFFSET;
2901 temp_dq_out1_delay = readl(addr + (i << 2));
2902 if (shift_dq + (int32_t)temp_dq_out1_delay >
2903 (int32_t)IO_IO_OUT1_DELAY_MAX) {
2904 shift_dq = (int32_t)IO_IO_OUT1_DELAY_MAX - temp_dq_out1_delay;
2905 } else if (shift_dq + (int32_t)temp_dq_out1_delay < 0) {
2906 shift_dq = -(int32_t)temp_dq_out1_delay;
2908 debug_cond(DLEVEL == 2, "write_center: after: shift_dq[%u]=%d\n",
2910 scc_mgr_set_dq_out1_delay(i, temp_dq_out1_delay + shift_dq);
2913 debug_cond(DLEVEL == 2, "write_center: margin[%u]=[%d,%d]\n", i,
2914 left_edge[i] - shift_dq + (-mid_min),
2915 right_edge[i] + shift_dq - (-mid_min));
2916 /* To determine values for export structures */
2917 if (left_edge[i] - shift_dq + (-mid_min) < dq_margin)
2918 dq_margin = left_edge[i] - shift_dq + (-mid_min);
2920 if (right_edge[i] + shift_dq - (-mid_min) < dqs_margin)
2921 dqs_margin = right_edge[i] + shift_dq - (-mid_min);
2925 scc_mgr_apply_group_dqs_io_and_oct_out1(write_group, new_dqs);
2926 writel(0, &sdr_scc_mgr->update);
2929 debug_cond(DLEVEL == 2, "%s:%d write_center: DM\n", __func__, __LINE__);
2932 * set the left and right edge of each bit to an illegal value,
2933 * use (IO_IO_OUT1_DELAY_MAX + 1) as an illegal value,
2935 left_edge[0] = IO_IO_OUT1_DELAY_MAX + 1;
2936 right_edge[0] = IO_IO_OUT1_DELAY_MAX + 1;
2937 int32_t bgn_curr = IO_IO_OUT1_DELAY_MAX + 1;
2938 int32_t end_curr = IO_IO_OUT1_DELAY_MAX + 1;
2939 int32_t bgn_best = IO_IO_OUT1_DELAY_MAX + 1;
2940 int32_t end_best = IO_IO_OUT1_DELAY_MAX + 1;
2941 int32_t win_best = 0;
2943 /* Search for the/part of the window with DM shift */
2944 for (d = IO_IO_OUT1_DELAY_MAX; d >= 0; d -= DELTA_D) {
2945 scc_mgr_apply_group_dm_out1_delay(d);
2946 writel(0, &sdr_scc_mgr->update);
2948 if (rw_mgr_mem_calibrate_write_test(rank_bgn, write_group, 1,
2949 PASS_ALL_BITS, &bit_chk,
2951 /* USE Set current end of the window */
2954 * If a starting edge of our window has not been seen
2955 * this is our current start of the DM window.
2957 if (bgn_curr == IO_IO_OUT1_DELAY_MAX + 1)
2961 * If current window is bigger than best seen.
2962 * Set best seen to be current window.
2964 if ((end_curr-bgn_curr+1) > win_best) {
2965 win_best = end_curr-bgn_curr+1;
2966 bgn_best = bgn_curr;
2967 end_best = end_curr;
2970 /* We just saw a failing test. Reset temp edge */
2971 bgn_curr = IO_IO_OUT1_DELAY_MAX + 1;
2972 end_curr = IO_IO_OUT1_DELAY_MAX + 1;
2977 /* Reset DM delay chains to 0 */
2978 scc_mgr_apply_group_dm_out1_delay(0);
2981 * Check to see if the current window nudges up aganist 0 delay.
2982 * If so we need to continue the search by shifting DQS otherwise DQS
2983 * search begins as a new search. */
2984 if (end_curr != 0) {
2985 bgn_curr = IO_IO_OUT1_DELAY_MAX + 1;
2986 end_curr = IO_IO_OUT1_DELAY_MAX + 1;
2989 /* Search for the/part of the window with DQS shifts */
2990 for (d = 0; d <= IO_IO_OUT1_DELAY_MAX - new_dqs; d += DELTA_D) {
2992 * Note: This only shifts DQS, so are we limiting ourselve to
2993 * width of DQ unnecessarily.
2995 scc_mgr_apply_group_dqs_io_and_oct_out1(write_group,
2998 writel(0, &sdr_scc_mgr->update);
2999 if (rw_mgr_mem_calibrate_write_test(rank_bgn, write_group, 1,
3000 PASS_ALL_BITS, &bit_chk,
3002 /* USE Set current end of the window */
3005 * If a beginning edge of our window has not been seen
3006 * this is our current begin of the DM window.
3008 if (bgn_curr == IO_IO_OUT1_DELAY_MAX + 1)
3012 * If current window is bigger than best seen. Set best
3013 * seen to be current window.
3015 if ((end_curr-bgn_curr+1) > win_best) {
3016 win_best = end_curr-bgn_curr+1;
3017 bgn_best = bgn_curr;
3018 end_best = end_curr;
3021 /* We just saw a failing test. Reset temp edge */
3022 bgn_curr = IO_IO_OUT1_DELAY_MAX + 1;
3023 end_curr = IO_IO_OUT1_DELAY_MAX + 1;
3025 /* Early exit optimization: if ther remaining delay
3026 chain space is less than already seen largest window
3029 (IO_IO_OUT1_DELAY_MAX - new_dqs - d)) {
3035 /* assign left and right edge for cal and reporting; */
3036 left_edge[0] = -1*bgn_best;
3037 right_edge[0] = end_best;
3039 debug_cond(DLEVEL == 2, "%s:%d dm_calib: left=%d right=%d\n", __func__,
3040 __LINE__, left_edge[0], right_edge[0]);
3042 /* Move DQS (back to orig) */
3043 scc_mgr_apply_group_dqs_io_and_oct_out1(write_group, new_dqs);
3047 /* Find middle of window for the DM bit */
3048 mid = (left_edge[0] - right_edge[0]) / 2;
3050 /* only move right, since we are not moving DQS/DQ */
3054 /* dm_marign should fail if we never find a window */
3058 dm_margin = left_edge[0] - mid;
3060 scc_mgr_apply_group_dm_out1_delay(mid);
3061 writel(0, &sdr_scc_mgr->update);
3063 debug_cond(DLEVEL == 2, "%s:%d dm_calib: left=%d right=%d mid=%d \
3064 dm_margin=%d\n", __func__, __LINE__, left_edge[0],
3065 right_edge[0], mid, dm_margin);
3067 gbl->fom_out += dq_margin + dqs_margin;
3069 debug_cond(DLEVEL == 2, "%s:%d write_center: dq_margin=%d \
3070 dqs_margin=%d dm_margin=%d\n", __func__, __LINE__,
3071 dq_margin, dqs_margin, dm_margin);
3074 * Do not remove this line as it makes sure all of our
3075 * decisions have been applied.
3077 writel(0, &sdr_scc_mgr->update);
3078 return (dq_margin >= 0) && (dqs_margin >= 0) && (dm_margin >= 0);
3081 /* calibrate the write operations */
3082 static uint32_t rw_mgr_mem_calibrate_writes(uint32_t rank_bgn, uint32_t g,
3085 /* update info for sims */
3086 debug("%s:%d %u %u\n", __func__, __LINE__, g, test_bgn);
3088 reg_file_set_stage(CAL_STAGE_WRITES);
3089 reg_file_set_sub_stage(CAL_SUBSTAGE_WRITES_CENTER);
3091 reg_file_set_group(g);
3093 if (!rw_mgr_mem_calibrate_writes_center(rank_bgn, g, test_bgn)) {
3094 set_failing_group_stage(g, CAL_STAGE_WRITES,
3095 CAL_SUBSTAGE_WRITES_CENTER);
3103 * mem_precharge_and_activate() - Precharge all banks and activate
3105 * Precharge all banks and activate row 0 in bank "000..." and bank "111...".
3107 static void mem_precharge_and_activate(void)
3111 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r++) {
3112 /* Test if the rank should be skipped. */
3113 if (param->skip_ranks[r])
3117 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_OFF);
3119 /* Precharge all banks. */
3120 writel(RW_MGR_PRECHARGE_ALL, SDR_PHYGRP_RWMGRGRP_ADDRESS |
3121 RW_MGR_RUN_SINGLE_GROUP_OFFSET);
3123 writel(0x0F, &sdr_rw_load_mgr_regs->load_cntr0);
3124 writel(RW_MGR_ACTIVATE_0_AND_1_WAIT1,
3125 &sdr_rw_load_jump_mgr_regs->load_jump_add0);
3127 writel(0x0F, &sdr_rw_load_mgr_regs->load_cntr1);
3128 writel(RW_MGR_ACTIVATE_0_AND_1_WAIT2,
3129 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
3131 /* Activate rows. */
3132 writel(RW_MGR_ACTIVATE_0_AND_1, SDR_PHYGRP_RWMGRGRP_ADDRESS |
3133 RW_MGR_RUN_SINGLE_GROUP_OFFSET);
3138 * mem_init_latency() - Configure memory RLAT and WLAT settings
3140 * Configure memory RLAT and WLAT parameters.
3142 static void mem_init_latency(void)
3145 * For AV/CV, LFIFO is hardened and always runs at full rate
3146 * so max latency in AFI clocks, used here, is correspondingly
3149 const u32 max_latency = (1 << MAX_LATENCY_COUNT_WIDTH) - 1;
3152 debug("%s:%d\n", __func__, __LINE__);
3155 * Read in write latency.
3156 * WL for Hard PHY does not include additive latency.
3158 wlat = readl(&data_mgr->t_wl_add);
3159 wlat += readl(&data_mgr->mem_t_add);
3161 gbl->rw_wl_nop_cycles = wlat - 1;
3163 /* Read in readl latency. */
3164 rlat = readl(&data_mgr->t_rl_add);
3166 /* Set a pretty high read latency initially. */
3167 gbl->curr_read_lat = rlat + 16;
3168 if (gbl->curr_read_lat > max_latency)
3169 gbl->curr_read_lat = max_latency;
3171 writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat);
3173 /* Advertise write latency. */
3174 writel(wlat, &phy_mgr_cfg->afi_wlat);
3178 * @mem_skip_calibrate() - Set VFIFO and LFIFO to instant-on settings
3180 * Set VFIFO and LFIFO to instant-on settings in skip calibration mode.
3182 static void mem_skip_calibrate(void)
3184 uint32_t vfifo_offset;
3187 debug("%s:%d\n", __func__, __LINE__);
3188 /* Need to update every shadow register set used by the interface */
3189 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
3190 r += NUM_RANKS_PER_SHADOW_REG) {
3192 * Set output phase alignment settings appropriate for
3195 for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) {
3196 scc_mgr_set_dqs_en_phase(i, 0);
3197 #if IO_DLL_CHAIN_LENGTH == 6
3198 scc_mgr_set_dqdqs_output_phase(i, 6);
3200 scc_mgr_set_dqdqs_output_phase(i, 7);
3205 * Write data arrives to the I/O two cycles before write
3206 * latency is reached (720 deg).
3207 * -> due to bit-slip in a/c bus
3208 * -> to allow board skew where dqs is longer than ck
3209 * -> how often can this happen!?
3210 * -> can claim back some ptaps for high freq
3211 * support if we can relax this, but i digress...
3213 * The write_clk leads mem_ck by 90 deg
3214 * The minimum ptap of the OPA is 180 deg
3215 * Each ptap has (360 / IO_DLL_CHAIN_LENGH) deg of delay
3216 * The write_clk is always delayed by 2 ptaps
3218 * Hence, to make DQS aligned to CK, we need to delay
3220 * (720 - 90 - 180 - 2 * (360 / IO_DLL_CHAIN_LENGTH))
3222 * Dividing the above by (360 / IO_DLL_CHAIN_LENGTH)
3223 * gives us the number of ptaps, which simplies to:
3225 * (1.25 * IO_DLL_CHAIN_LENGTH - 2)
3227 scc_mgr_set_dqdqs_output_phase(i,
3228 1.25 * IO_DLL_CHAIN_LENGTH - 2);
3230 writel(0xff, &sdr_scc_mgr->dqs_ena);
3231 writel(0xff, &sdr_scc_mgr->dqs_io_ena);
3233 for (i = 0; i < RW_MGR_MEM_IF_WRITE_DQS_WIDTH; i++) {
3234 writel(i, SDR_PHYGRP_SCCGRP_ADDRESS |
3235 SCC_MGR_GROUP_COUNTER_OFFSET);
3237 writel(0xff, &sdr_scc_mgr->dq_ena);
3238 writel(0xff, &sdr_scc_mgr->dm_ena);
3239 writel(0, &sdr_scc_mgr->update);
3242 /* Compensate for simulation model behaviour */
3243 for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) {
3244 scc_mgr_set_dqs_bus_in_delay(i, 10);
3245 scc_mgr_load_dqs(i);
3247 writel(0, &sdr_scc_mgr->update);
3250 * ArriaV has hard FIFOs that can only be initialized by incrementing
3253 vfifo_offset = CALIB_VFIFO_OFFSET;
3254 for (j = 0; j < vfifo_offset; j++)
3255 writel(0xff, &phy_mgr_cmd->inc_vfifo_hard_phy);
3256 writel(0, &phy_mgr_cmd->fifo_reset);
3259 * For Arria V and Cyclone V with hard LFIFO, we get the skip-cal
3260 * setting from generation-time constant.
3262 gbl->curr_read_lat = CALIB_LFIFO_OFFSET;
3263 writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat);
3267 * mem_calibrate() - Memory calibration entry point.
3269 * Perform memory calibration.
3271 static uint32_t mem_calibrate(void)
3274 uint32_t rank_bgn, sr;
3275 uint32_t write_group, write_test_bgn;
3276 uint32_t read_group, read_test_bgn;
3277 uint32_t run_groups, current_run;
3278 uint32_t failing_groups = 0;
3279 uint32_t group_failed = 0;
3281 const u32 rwdqs_ratio = RW_MGR_MEM_IF_READ_DQS_WIDTH /
3282 RW_MGR_MEM_IF_WRITE_DQS_WIDTH;
3284 debug("%s:%d\n", __func__, __LINE__);
3286 /* Initialize the data settings */
3287 gbl->error_substage = CAL_SUBSTAGE_NIL;
3288 gbl->error_stage = CAL_STAGE_NIL;
3289 gbl->error_group = 0xff;
3293 /* Initialize WLAT and RLAT. */
3296 /* Initialize bit slips. */
3297 mem_precharge_and_activate();
3299 for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) {
3300 writel(i, SDR_PHYGRP_SCCGRP_ADDRESS |
3301 SCC_MGR_GROUP_COUNTER_OFFSET);
3302 /* Only needed once to set all groups, pins, DQ, DQS, DM. */
3304 scc_mgr_set_hhp_extras();
3306 scc_set_bypass_mode(i);
3309 /* Calibration is skipped. */
3310 if ((dyn_calib_steps & CALIB_SKIP_ALL) == CALIB_SKIP_ALL) {
3312 * Set VFIFO and LFIFO to instant-on settings in skip
3315 mem_skip_calibrate();
3318 * Do not remove this line as it makes sure all of our
3319 * decisions have been applied.
3321 writel(0, &sdr_scc_mgr->update);
3325 /* Calibration is not skipped. */
3326 for (i = 0; i < NUM_CALIB_REPEAT; i++) {
3328 * Zero all delay chain/phase settings for all
3329 * groups and all shadow register sets.
3333 run_groups = ~param->skip_groups;
3335 for (write_group = 0, write_test_bgn = 0; write_group
3336 < RW_MGR_MEM_IF_WRITE_DQS_WIDTH; write_group++,
3337 write_test_bgn += RW_MGR_MEM_DQ_PER_WRITE_DQS) {
3339 /* Initialize the group failure */
3342 current_run = run_groups & ((1 <<
3343 RW_MGR_NUM_DQS_PER_WRITE_GROUP) - 1);
3344 run_groups = run_groups >>
3345 RW_MGR_NUM_DQS_PER_WRITE_GROUP;
3347 if (current_run == 0)
3350 writel(write_group, SDR_PHYGRP_SCCGRP_ADDRESS |
3351 SCC_MGR_GROUP_COUNTER_OFFSET);
3352 scc_mgr_zero_group(write_group, 0);
3354 for (read_group = write_group * rwdqs_ratio,
3356 read_group < (write_group + 1) * rwdqs_ratio;
3358 read_test_bgn += RW_MGR_MEM_DQ_PER_READ_DQS) {
3359 if (STATIC_CALIB_STEPS & CALIB_SKIP_VFIFO)
3362 /* Calibrate the VFIFO */
3363 if (rw_mgr_mem_calibrate_vfifo(read_group,
3367 if (!(gbl->phy_debug_mode_flags & PHY_DEBUG_SWEEP_ALL_GROUPS))
3370 /* The group failed, we're done. */
3374 /* Calibrate the output side */
3375 for (rank_bgn = 0, sr = 0;
3376 rank_bgn < RW_MGR_MEM_NUMBER_OF_RANKS;
3377 rank_bgn += NUM_RANKS_PER_SHADOW_REG, sr++) {
3378 if (STATIC_CALIB_STEPS & CALIB_SKIP_WRITES)
3381 /* Not needed in quick mode! */
3382 if (STATIC_CALIB_STEPS & CALIB_SKIP_DELAY_SWEEPS)
3386 * Determine if this set of ranks
3387 * should be skipped entirely.
3389 if (param->skip_shadow_regs[sr])
3392 /* Calibrate WRITEs */
3393 if (rw_mgr_mem_calibrate_writes(rank_bgn,
3394 write_group, write_test_bgn))
3398 if (!(gbl->phy_debug_mode_flags & PHY_DEBUG_SWEEP_ALL_GROUPS))
3402 /* Some group failed, we're done. */
3406 for (read_group = write_group * rwdqs_ratio,
3408 read_group < (write_group + 1) * rwdqs_ratio;
3410 read_test_bgn += RW_MGR_MEM_DQ_PER_READ_DQS) {
3411 if (STATIC_CALIB_STEPS & CALIB_SKIP_WRITES)
3414 if (rw_mgr_mem_calibrate_vfifo_end(read_group,
3418 if (!(gbl->phy_debug_mode_flags & PHY_DEBUG_SWEEP_ALL_GROUPS))
3421 /* The group failed, we're done. */
3425 /* No group failed, continue as usual. */
3428 grp_failed: /* A group failed, increment the counter. */
3433 * USER If there are any failing groups then report
3436 if (failing_groups != 0)
3439 if (STATIC_CALIB_STEPS & CALIB_SKIP_LFIFO)
3443 * If we're skipping groups as part of debug,
3444 * don't calibrate LFIFO.
3446 if (param->skip_groups != 0)
3449 /* Calibrate the LFIFO */
3450 if (!rw_mgr_mem_calibrate_lfifo())
3455 * Do not remove this line as it makes sure all of our decisions
3456 * have been applied.
3458 writel(0, &sdr_scc_mgr->update);
3463 * run_mem_calibrate() - Perform memory calibration
3465 * This function triggers the entire memory calibration procedure.
3467 static int run_mem_calibrate(void)
3471 debug("%s:%d\n", __func__, __LINE__);
3473 /* Reset pass/fail status shown on afi_cal_success/fail */
3474 writel(PHY_MGR_CAL_RESET, &phy_mgr_cfg->cal_status);
3476 /* Stop tracking manager. */
3477 clrbits_le32(&sdr_ctrl->ctrl_cfg, 1 << 22);
3479 phy_mgr_initialize();
3480 rw_mgr_mem_initialize();
3482 /* Perform the actual memory calibration. */
3483 pass = mem_calibrate();
3485 mem_precharge_and_activate();
3486 writel(0, &phy_mgr_cmd->fifo_reset);
3489 rw_mgr_mem_handoff();
3491 * In Hard PHY this is a 2-bit control:
3493 * 1: DDIO Mux Select
3495 writel(0x2, &phy_mgr_cfg->mux_sel);
3497 /* Start tracking manager. */
3498 setbits_le32(&sdr_ctrl->ctrl_cfg, 1 << 22);
3504 * debug_mem_calibrate() - Report result of memory calibration
3505 * @pass: Value indicating whether calibration passed or failed
3507 * This function reports the results of the memory calibration
3508 * and writes debug information into the register file.
3510 static void debug_mem_calibrate(int pass)
3512 uint32_t debug_info;
3515 printf("%s: CALIBRATION PASSED\n", __FILE__);
3520 if (gbl->fom_in > 0xff)
3523 if (gbl->fom_out > 0xff)
3524 gbl->fom_out = 0xff;
3526 /* Update the FOM in the register file */
3527 debug_info = gbl->fom_in;
3528 debug_info |= gbl->fom_out << 8;
3529 writel(debug_info, &sdr_reg_file->fom);
3531 writel(debug_info, &phy_mgr_cfg->cal_debug_info);
3532 writel(PHY_MGR_CAL_SUCCESS, &phy_mgr_cfg->cal_status);
3534 printf("%s: CALIBRATION FAILED\n", __FILE__);
3536 debug_info = gbl->error_stage;
3537 debug_info |= gbl->error_substage << 8;
3538 debug_info |= gbl->error_group << 16;
3540 writel(debug_info, &sdr_reg_file->failing_stage);
3541 writel(debug_info, &phy_mgr_cfg->cal_debug_info);
3542 writel(PHY_MGR_CAL_FAIL, &phy_mgr_cfg->cal_status);
3544 /* Update the failing group/stage in the register file */
3545 debug_info = gbl->error_stage;
3546 debug_info |= gbl->error_substage << 8;
3547 debug_info |= gbl->error_group << 16;
3548 writel(debug_info, &sdr_reg_file->failing_stage);
3551 printf("%s: Calibration complete\n", __FILE__);
3555 * hc_initialize_rom_data() - Initialize ROM data
3557 * Initialize ROM data.
3559 static void hc_initialize_rom_data(void)
3563 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_INST_ROM_WRITE_OFFSET;
3564 for (i = 0; i < ARRAY_SIZE(inst_rom_init); i++)
3565 writel(inst_rom_init[i], addr + (i << 2));
3567 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_AC_ROM_WRITE_OFFSET;
3568 for (i = 0; i < ARRAY_SIZE(ac_rom_init); i++)
3569 writel(ac_rom_init[i], addr + (i << 2));
3573 * initialize_reg_file() - Initialize SDR register file
3575 * Initialize SDR register file.
3577 static void initialize_reg_file(void)
3579 /* Initialize the register file with the correct data */
3580 writel(REG_FILE_INIT_SEQ_SIGNATURE, &sdr_reg_file->signature);
3581 writel(0, &sdr_reg_file->debug_data_addr);
3582 writel(0, &sdr_reg_file->cur_stage);
3583 writel(0, &sdr_reg_file->fom);
3584 writel(0, &sdr_reg_file->failing_stage);
3585 writel(0, &sdr_reg_file->debug1);
3586 writel(0, &sdr_reg_file->debug2);
3590 * initialize_hps_phy() - Initialize HPS PHY
3592 * Initialize HPS PHY.
3594 static void initialize_hps_phy(void)
3598 * Tracking also gets configured here because it's in the
3601 uint32_t trk_sample_count = 7500;
3602 uint32_t trk_long_idle_sample_count = (10 << 16) | 100;
3604 * Format is number of outer loops in the 16 MSB, sample
3609 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ACDELAYEN_SET(2);
3610 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQDELAYEN_SET(1);
3611 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSDELAYEN_SET(1);
3612 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSLOGICDELAYEN_SET(1);
3613 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_RESETDELAYEN_SET(0);
3614 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_LPDDRDIS_SET(1);
3616 * This field selects the intrinsic latency to RDATA_EN/FULL path.
3617 * 00-bypass, 01- add 5 cycles, 10- add 10 cycles, 11- add 15 cycles.
3619 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ADDLATSEL_SET(0);
3620 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_SAMPLECOUNT_19_0_SET(
3622 writel(reg, &sdr_ctrl->phy_ctrl0);
3625 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_SAMPLECOUNT_31_20_SET(
3627 SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_SAMPLECOUNT_19_0_WIDTH);
3628 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_LONGIDLESAMPLECOUNT_19_0_SET(
3629 trk_long_idle_sample_count);
3630 writel(reg, &sdr_ctrl->phy_ctrl1);
3633 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_2_LONGIDLESAMPLECOUNT_31_20_SET(
3634 trk_long_idle_sample_count >>
3635 SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_LONGIDLESAMPLECOUNT_19_0_WIDTH);
3636 writel(reg, &sdr_ctrl->phy_ctrl2);
3640 * initialize_tracking() - Initialize tracking
3642 * Initialize the register file with usable initial data.
3644 static void initialize_tracking(void)
3647 * Initialize the register file with the correct data.
3648 * Compute usable version of value in case we skip full
3649 * computation later.
3651 writel(DIV_ROUND_UP(IO_DELAY_PER_OPA_TAP, IO_DELAY_PER_DCHAIN_TAP) - 1,
3652 &sdr_reg_file->dtaps_per_ptap);
3654 /* trk_sample_count */
3655 writel(7500, &sdr_reg_file->trk_sample_count);
3657 /* longidle outer loop [15:0] */
3658 writel((10 << 16) | (100 << 0), &sdr_reg_file->trk_longidle);
3661 * longidle sample count [31:24]
3662 * trfc, worst case of 933Mhz 4Gb [23:16]
3663 * trcd, worst case [15:8]
3666 writel((243 << 24) | (14 << 16) | (10 << 8) | (4 << 0),
3667 &sdr_reg_file->delays);
3670 writel((RW_MGR_IDLE << 24) | (RW_MGR_ACTIVATE_1 << 16) |
3671 (RW_MGR_SGLE_READ << 8) | (RW_MGR_PRECHARGE_ALL << 0),
3672 &sdr_reg_file->trk_rw_mgr_addr);
3674 writel(RW_MGR_MEM_IF_READ_DQS_WIDTH,
3675 &sdr_reg_file->trk_read_dqs_width);
3678 writel((RW_MGR_REFRESH_ALL << 24) | (1000 << 0),
3679 &sdr_reg_file->trk_rfsh);
3682 int sdram_calibration_full(void)
3684 struct param_type my_param;
3685 struct gbl_type my_gbl;
3688 memset(&my_param, 0, sizeof(my_param));
3689 memset(&my_gbl, 0, sizeof(my_gbl));
3694 /* Set the calibration enabled by default */
3695 gbl->phy_debug_mode_flags |= PHY_DEBUG_ENABLE_CAL_RPT;
3697 * Only sweep all groups (regardless of fail state) by default
3698 * Set enabled read test by default.
3700 #if DISABLE_GUARANTEED_READ
3701 gbl->phy_debug_mode_flags |= PHY_DEBUG_DISABLE_GUARANTEED_READ;
3703 /* Initialize the register file */
3704 initialize_reg_file();
3706 /* Initialize any PHY CSR */
3707 initialize_hps_phy();
3709 scc_mgr_initialize();
3711 initialize_tracking();
3713 printf("%s: Preparing to start memory calibration\n", __FILE__);
3715 debug("%s:%d\n", __func__, __LINE__);
3716 debug_cond(DLEVEL == 1,
3717 "DDR3 FULL_RATE ranks=%u cs/dimm=%u dq/dqs=%u,%u vg/dqs=%u,%u ",
3718 RW_MGR_MEM_NUMBER_OF_RANKS, RW_MGR_MEM_NUMBER_OF_CS_PER_DIMM,
3719 RW_MGR_MEM_DQ_PER_READ_DQS, RW_MGR_MEM_DQ_PER_WRITE_DQS,
3720 RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS,
3721 RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS);
3722 debug_cond(DLEVEL == 1,
3723 "dqs=%u,%u dq=%u dm=%u ptap_delay=%u dtap_delay=%u ",
3724 RW_MGR_MEM_IF_READ_DQS_WIDTH, RW_MGR_MEM_IF_WRITE_DQS_WIDTH,
3725 RW_MGR_MEM_DATA_WIDTH, RW_MGR_MEM_DATA_MASK_WIDTH,
3726 IO_DELAY_PER_OPA_TAP, IO_DELAY_PER_DCHAIN_TAP);
3727 debug_cond(DLEVEL == 1, "dtap_dqsen_delay=%u, dll=%u",
3728 IO_DELAY_PER_DQS_EN_DCHAIN_TAP, IO_DLL_CHAIN_LENGTH);
3729 debug_cond(DLEVEL == 1, "max values: en_p=%u dqdqs_p=%u en_d=%u dqs_in_d=%u ",
3730 IO_DQS_EN_PHASE_MAX, IO_DQDQS_OUT_PHASE_MAX,
3731 IO_DQS_EN_DELAY_MAX, IO_DQS_IN_DELAY_MAX);
3732 debug_cond(DLEVEL == 1, "io_in_d=%u io_out1_d=%u io_out2_d=%u ",
3733 IO_IO_IN_DELAY_MAX, IO_IO_OUT1_DELAY_MAX,
3734 IO_IO_OUT2_DELAY_MAX);
3735 debug_cond(DLEVEL == 1, "dqs_in_reserve=%u dqs_out_reserve=%u\n",
3736 IO_DQS_IN_RESERVE, IO_DQS_OUT_RESERVE);
3738 hc_initialize_rom_data();
3740 /* update info for sims */
3741 reg_file_set_stage(CAL_STAGE_NIL);
3742 reg_file_set_group(0);
3745 * Load global needed for those actions that require
3746 * some dynamic calibration support.
3748 dyn_calib_steps = STATIC_CALIB_STEPS;
3750 * Load global to allow dynamic selection of delay loop settings
3751 * based on calibration mode.
3753 if (!(dyn_calib_steps & CALIB_SKIP_DELAY_LOOPS))
3754 skip_delay_mask = 0xff;
3756 skip_delay_mask = 0x0;
3758 pass = run_mem_calibrate();
3759 debug_mem_calibrate(pass);