2 * Copyright Altera Corporation (C) 2012-2015
4 * SPDX-License-Identifier: BSD-3-Clause
9 #include <asm/arch/sdram.h>
11 #include "sequencer.h"
12 #include "sequencer_auto.h"
13 #include "sequencer_auto_ac_init.h"
14 #include "sequencer_auto_inst_init.h"
15 #include "sequencer_defines.h"
17 static struct socfpga_sdr_rw_load_manager *sdr_rw_load_mgr_regs =
18 (struct socfpga_sdr_rw_load_manager *)(SDR_PHYGRP_RWMGRGRP_ADDRESS | 0x800);
20 static struct socfpga_sdr_rw_load_jump_manager *sdr_rw_load_jump_mgr_regs =
21 (struct socfpga_sdr_rw_load_jump_manager *)(SDR_PHYGRP_RWMGRGRP_ADDRESS | 0xC00);
23 static struct socfpga_sdr_reg_file *sdr_reg_file =
24 (struct socfpga_sdr_reg_file *)SDR_PHYGRP_REGFILEGRP_ADDRESS;
26 static struct socfpga_sdr_scc_mgr *sdr_scc_mgr =
27 (struct socfpga_sdr_scc_mgr *)(SDR_PHYGRP_SCCGRP_ADDRESS | 0xe00);
29 static struct socfpga_phy_mgr_cmd *phy_mgr_cmd =
30 (struct socfpga_phy_mgr_cmd *)SDR_PHYGRP_PHYMGRGRP_ADDRESS;
32 static struct socfpga_phy_mgr_cfg *phy_mgr_cfg =
33 (struct socfpga_phy_mgr_cfg *)(SDR_PHYGRP_PHYMGRGRP_ADDRESS | 0x40);
35 static struct socfpga_data_mgr *data_mgr =
36 (struct socfpga_data_mgr *)SDR_PHYGRP_DATAMGRGRP_ADDRESS;
38 static struct socfpga_sdr_ctrl *sdr_ctrl =
39 (struct socfpga_sdr_ctrl *)SDR_CTRLGRP_ADDRESS;
44 * In order to reduce ROM size, most of the selectable calibration steps are
45 * decided at compile time based on the user's calibration mode selection,
46 * as captured by the STATIC_CALIB_STEPS selection below.
48 * However, to support simulation-time selection of fast simulation mode, where
49 * we skip everything except the bare minimum, we need a few of the steps to
50 * be dynamic. In those cases, we either use the DYNAMIC_CALIB_STEPS for the
51 * check, which is based on the rtl-supplied value, or we dynamically compute
52 * the value to use based on the dynamically-chosen calibration mode
56 #define STATIC_IN_RTL_SIM 0
57 #define STATIC_SKIP_DELAY_LOOPS 0
59 #define STATIC_CALIB_STEPS (STATIC_IN_RTL_SIM | CALIB_SKIP_FULL_TEST | \
60 STATIC_SKIP_DELAY_LOOPS)
62 /* calibration steps requested by the rtl */
63 uint16_t dyn_calib_steps;
66 * To make CALIB_SKIP_DELAY_LOOPS a dynamic conditional option
67 * instead of static, we use boolean logic to select between
68 * non-skip and skip values
70 * The mask is set to include all bits when not-skipping, but is
74 uint16_t skip_delay_mask; /* mask off bits when skipping/not-skipping */
76 #define SKIP_DELAY_LOOP_VALUE_OR_ZERO(non_skip_value) \
77 ((non_skip_value) & skip_delay_mask)
80 struct param_type *param;
81 uint32_t curr_shadow_reg;
83 static void set_failing_group_stage(uint32_t group, uint32_t stage,
87 * Only set the global stage if there was not been any other
90 if (gbl->error_stage == CAL_STAGE_NIL) {
91 gbl->error_substage = substage;
92 gbl->error_stage = stage;
93 gbl->error_group = group;
97 static void reg_file_set_group(u16 set_group)
99 clrsetbits_le32(&sdr_reg_file->cur_stage, 0xffff0000, set_group << 16);
102 static void reg_file_set_stage(u8 set_stage)
104 clrsetbits_le32(&sdr_reg_file->cur_stage, 0xffff, set_stage & 0xff);
107 static void reg_file_set_sub_stage(u8 set_sub_stage)
109 set_sub_stage &= 0xff;
110 clrsetbits_le32(&sdr_reg_file->cur_stage, 0xff00, set_sub_stage << 8);
114 * phy_mgr_initialize() - Initialize PHY Manager
116 * Initialize PHY Manager.
118 static void phy_mgr_initialize(void)
122 debug("%s:%d\n", __func__, __LINE__);
123 /* Calibration has control over path to memory */
125 * In Hard PHY this is a 2-bit control:
129 writel(0x3, &phy_mgr_cfg->mux_sel);
131 /* USER memory clock is not stable we begin initialization */
132 writel(0, &phy_mgr_cfg->reset_mem_stbl);
134 /* USER calibration status all set to zero */
135 writel(0, &phy_mgr_cfg->cal_status);
137 writel(0, &phy_mgr_cfg->cal_debug_info);
139 /* Init params only if we do NOT skip calibration. */
140 if ((dyn_calib_steps & CALIB_SKIP_ALL) == CALIB_SKIP_ALL)
143 ratio = RW_MGR_MEM_DQ_PER_READ_DQS /
144 RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS;
145 param->read_correct_mask_vg = (1 << ratio) - 1;
146 param->write_correct_mask_vg = (1 << ratio) - 1;
147 param->read_correct_mask = (1 << RW_MGR_MEM_DQ_PER_READ_DQS) - 1;
148 param->write_correct_mask = (1 << RW_MGR_MEM_DQ_PER_WRITE_DQS) - 1;
149 ratio = RW_MGR_MEM_DATA_WIDTH /
150 RW_MGR_MEM_DATA_MASK_WIDTH;
151 param->dm_correct_mask = (1 << ratio) - 1;
155 * set_rank_and_odt_mask() - Set Rank and ODT mask
157 * @odt_mode: ODT mode, OFF or READ_WRITE
159 * Set Rank and ODT mask (On-Die Termination).
161 static void set_rank_and_odt_mask(const u32 rank, const u32 odt_mode)
167 if (odt_mode == RW_MGR_ODT_MODE_OFF) {
170 } else { /* RW_MGR_ODT_MODE_READ_WRITE */
171 switch (RW_MGR_MEM_NUMBER_OF_RANKS) {
173 /* Read: ODT = 0 ; Write: ODT = 1 */
177 case 2: /* 2 Ranks */
178 if (RW_MGR_MEM_NUMBER_OF_CS_PER_DIMM == 1) {
180 * - Dual-Slot , Single-Rank (1 CS per DIMM)
182 * - RDIMM, 4 total CS (2 CS per DIMM, 2 DIMM)
184 * Since MEM_NUMBER_OF_RANKS is 2, they
185 * are both single rank with 2 CS each
186 * (special for RDIMM).
188 * Read: Turn on ODT on the opposite rank
189 * Write: Turn on ODT on all ranks
191 odt_mask_0 = 0x3 & ~(1 << rank);
195 * - Single-Slot , Dual-Rank (2 CS per DIMM)
197 * Read: Turn on ODT off on all ranks
198 * Write: Turn on ODT on active rank
201 odt_mask_1 = 0x3 & (1 << rank);
204 case 4: /* 4 Ranks */
206 * ----------+-----------------------+
208 * Read From +-----------------------+
209 * Rank | 3 | 2 | 1 | 0 |
210 * ----------+-----+-----+-----+-----+
211 * 0 | 0 | 1 | 0 | 0 |
212 * 1 | 1 | 0 | 0 | 0 |
213 * 2 | 0 | 0 | 0 | 1 |
214 * 3 | 0 | 0 | 1 | 0 |
215 * ----------+-----+-----+-----+-----+
218 * ----------+-----------------------+
220 * Write To +-----------------------+
221 * Rank | 3 | 2 | 1 | 0 |
222 * ----------+-----+-----+-----+-----+
223 * 0 | 0 | 1 | 0 | 1 |
224 * 1 | 1 | 0 | 1 | 0 |
225 * 2 | 0 | 1 | 0 | 1 |
226 * 3 | 1 | 0 | 1 | 0 |
227 * ----------+-----+-----+-----+-----+
251 cs_and_odt_mask = (0xFF & ~(1 << rank)) |
252 ((0xFF & odt_mask_0) << 8) |
253 ((0xFF & odt_mask_1) << 16);
254 writel(cs_and_odt_mask, SDR_PHYGRP_RWMGRGRP_ADDRESS |
255 RW_MGR_SET_CS_AND_ODT_MASK_OFFSET);
259 * scc_mgr_set() - Set SCC Manager register
260 * @off: Base offset in SCC Manager space
261 * @grp: Read/Write group
262 * @val: Value to be set
264 * This function sets the SCC Manager (Scan Chain Control Manager) register.
266 static void scc_mgr_set(u32 off, u32 grp, u32 val)
268 writel(val, SDR_PHYGRP_SCCGRP_ADDRESS | off | (grp << 2));
272 * scc_mgr_initialize() - Initialize SCC Manager registers
274 * Initialize SCC Manager registers.
276 static void scc_mgr_initialize(void)
279 * Clear register file for HPS. 16 (2^4) is the size of the
280 * full register file in the scc mgr:
281 * RFILE_DEPTH = 1 + log2(MEM_DQ_PER_DQS + 1 + MEM_DM_PER_DQS +
282 * MEM_IF_READ_DQS_WIDTH - 1);
286 for (i = 0; i < 16; i++) {
287 debug_cond(DLEVEL == 1, "%s:%d: Clearing SCC RFILE index %u\n",
288 __func__, __LINE__, i);
289 scc_mgr_set(SCC_MGR_HHP_RFILE_OFFSET, 0, i);
293 static void scc_mgr_set_dqdqs_output_phase(uint32_t write_group, uint32_t phase)
295 scc_mgr_set(SCC_MGR_DQDQS_OUT_PHASE_OFFSET, write_group, phase);
298 static void scc_mgr_set_dqs_bus_in_delay(uint32_t read_group, uint32_t delay)
300 scc_mgr_set(SCC_MGR_DQS_IN_DELAY_OFFSET, read_group, delay);
303 static void scc_mgr_set_dqs_en_phase(uint32_t read_group, uint32_t phase)
305 scc_mgr_set(SCC_MGR_DQS_EN_PHASE_OFFSET, read_group, phase);
308 static void scc_mgr_set_dqs_en_delay(uint32_t read_group, uint32_t delay)
310 scc_mgr_set(SCC_MGR_DQS_EN_DELAY_OFFSET, read_group, delay);
313 static void scc_mgr_set_dqs_io_in_delay(uint32_t delay)
315 scc_mgr_set(SCC_MGR_IO_IN_DELAY_OFFSET, RW_MGR_MEM_DQ_PER_WRITE_DQS,
319 static void scc_mgr_set_dq_in_delay(uint32_t dq_in_group, uint32_t delay)
321 scc_mgr_set(SCC_MGR_IO_IN_DELAY_OFFSET, dq_in_group, delay);
324 static void scc_mgr_set_dq_out1_delay(uint32_t dq_in_group, uint32_t delay)
326 scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET, dq_in_group, delay);
329 static void scc_mgr_set_dqs_out1_delay(uint32_t delay)
331 scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET, RW_MGR_MEM_DQ_PER_WRITE_DQS,
335 static void scc_mgr_set_dm_out1_delay(uint32_t dm, uint32_t delay)
337 scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET,
338 RW_MGR_MEM_DQ_PER_WRITE_DQS + 1 + dm,
342 /* load up dqs config settings */
343 static void scc_mgr_load_dqs(uint32_t dqs)
345 writel(dqs, &sdr_scc_mgr->dqs_ena);
348 /* load up dqs io config settings */
349 static void scc_mgr_load_dqs_io(void)
351 writel(0, &sdr_scc_mgr->dqs_io_ena);
354 /* load up dq config settings */
355 static void scc_mgr_load_dq(uint32_t dq_in_group)
357 writel(dq_in_group, &sdr_scc_mgr->dq_ena);
360 /* load up dm config settings */
361 static void scc_mgr_load_dm(uint32_t dm)
363 writel(dm, &sdr_scc_mgr->dm_ena);
367 * scc_mgr_set_all_ranks() - Set SCC Manager register for all ranks
368 * @off: Base offset in SCC Manager space
369 * @grp: Read/Write group
370 * @val: Value to be set
371 * @update: If non-zero, trigger SCC Manager update for all ranks
373 * This function sets the SCC Manager (Scan Chain Control Manager) register
374 * and optionally triggers the SCC update for all ranks.
376 static void scc_mgr_set_all_ranks(const u32 off, const u32 grp, const u32 val,
381 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
382 r += NUM_RANKS_PER_SHADOW_REG) {
383 scc_mgr_set(off, grp, val);
385 if (update || (r == 0)) {
386 writel(grp, &sdr_scc_mgr->dqs_ena);
387 writel(0, &sdr_scc_mgr->update);
392 static void scc_mgr_set_dqs_en_phase_all_ranks(u32 read_group, u32 phase)
395 * USER although the h/w doesn't support different phases per
396 * shadow register, for simplicity our scc manager modeling
397 * keeps different phase settings per shadow reg, and it's
398 * important for us to keep them in sync to match h/w.
399 * for efficiency, the scan chain update should occur only
402 scc_mgr_set_all_ranks(SCC_MGR_DQS_EN_PHASE_OFFSET,
403 read_group, phase, 0);
406 static void scc_mgr_set_dqdqs_output_phase_all_ranks(uint32_t write_group,
410 * USER although the h/w doesn't support different phases per
411 * shadow register, for simplicity our scc manager modeling
412 * keeps different phase settings per shadow reg, and it's
413 * important for us to keep them in sync to match h/w.
414 * for efficiency, the scan chain update should occur only
417 scc_mgr_set_all_ranks(SCC_MGR_DQDQS_OUT_PHASE_OFFSET,
418 write_group, phase, 0);
421 static void scc_mgr_set_dqs_en_delay_all_ranks(uint32_t read_group,
425 * In shadow register mode, the T11 settings are stored in
426 * registers in the core, which are updated by the DQS_ENA
427 * signals. Not issuing the SCC_MGR_UPD command allows us to
428 * save lots of rank switching overhead, by calling
429 * select_shadow_regs_for_update with update_scan_chains
432 scc_mgr_set_all_ranks(SCC_MGR_DQS_EN_DELAY_OFFSET,
433 read_group, delay, 1);
434 writel(0, &sdr_scc_mgr->update);
438 * scc_mgr_set_oct_out1_delay() - Set OCT output delay
439 * @write_group: Write group
440 * @delay: Delay value
442 * This function sets the OCT output delay in SCC manager.
444 static void scc_mgr_set_oct_out1_delay(const u32 write_group, const u32 delay)
446 const int ratio = RW_MGR_MEM_IF_READ_DQS_WIDTH /
447 RW_MGR_MEM_IF_WRITE_DQS_WIDTH;
448 const int base = write_group * ratio;
451 * Load the setting in the SCC manager
452 * Although OCT affects only write data, the OCT delay is controlled
453 * by the DQS logic block which is instantiated once per read group.
454 * For protocols where a write group consists of multiple read groups,
455 * the setting must be set multiple times.
457 for (i = 0; i < ratio; i++)
458 scc_mgr_set(SCC_MGR_OCT_OUT1_DELAY_OFFSET, base + i, delay);
462 * scc_mgr_set_hhp_extras() - Set HHP extras.
464 * Load the fixed setting in the SCC manager HHP extras.
466 static void scc_mgr_set_hhp_extras(void)
469 * Load the fixed setting in the SCC manager
470 * bits: 0:0 = 1'b1 - DQS bypass
471 * bits: 1:1 = 1'b1 - DQ bypass
472 * bits: 4:2 = 3'b001 - rfifo_mode
473 * bits: 6:5 = 2'b01 - rfifo clock_select
474 * bits: 7:7 = 1'b0 - separate gating from ungating setting
475 * bits: 8:8 = 1'b0 - separate OE from Output delay setting
477 const u32 value = (0 << 8) | (0 << 7) | (1 << 5) |
478 (1 << 2) | (1 << 1) | (1 << 0);
479 const u32 addr = SDR_PHYGRP_SCCGRP_ADDRESS |
480 SCC_MGR_HHP_GLOBALS_OFFSET |
481 SCC_MGR_HHP_EXTRAS_OFFSET;
483 debug_cond(DLEVEL == 1, "%s:%d Setting HHP Extras\n",
486 debug_cond(DLEVEL == 1, "%s:%d Done Setting HHP Extras\n",
491 * scc_mgr_zero_all() - Zero all DQS config
493 * Zero all DQS config.
495 static void scc_mgr_zero_all(void)
500 * USER Zero all DQS config settings, across all groups and all
503 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
504 r += NUM_RANKS_PER_SHADOW_REG) {
505 for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) {
507 * The phases actually don't exist on a per-rank basis,
508 * but there's no harm updating them several times, so
509 * let's keep the code simple.
511 scc_mgr_set_dqs_bus_in_delay(i, IO_DQS_IN_RESERVE);
512 scc_mgr_set_dqs_en_phase(i, 0);
513 scc_mgr_set_dqs_en_delay(i, 0);
516 for (i = 0; i < RW_MGR_MEM_IF_WRITE_DQS_WIDTH; i++) {
517 scc_mgr_set_dqdqs_output_phase(i, 0);
518 /* Arria V/Cyclone V don't have out2. */
519 scc_mgr_set_oct_out1_delay(i, IO_DQS_OUT_RESERVE);
523 /* Multicast to all DQS group enables. */
524 writel(0xff, &sdr_scc_mgr->dqs_ena);
525 writel(0, &sdr_scc_mgr->update);
529 * scc_set_bypass_mode() - Set bypass mode and trigger SCC update
530 * @write_group: Write group
532 * Set bypass mode and trigger SCC update.
534 static void scc_set_bypass_mode(const u32 write_group)
536 /* Multicast to all DQ enables. */
537 writel(0xff, &sdr_scc_mgr->dq_ena);
538 writel(0xff, &sdr_scc_mgr->dm_ena);
540 /* Update current DQS IO enable. */
541 writel(0, &sdr_scc_mgr->dqs_io_ena);
543 /* Update the DQS logic. */
544 writel(write_group, &sdr_scc_mgr->dqs_ena);
547 writel(0, &sdr_scc_mgr->update);
551 * scc_mgr_load_dqs_for_write_group() - Load DQS settings for Write Group
552 * @write_group: Write group
554 * Load DQS settings for Write Group, do not trigger SCC update.
556 static void scc_mgr_load_dqs_for_write_group(const u32 write_group)
558 const int ratio = RW_MGR_MEM_IF_READ_DQS_WIDTH /
559 RW_MGR_MEM_IF_WRITE_DQS_WIDTH;
560 const int base = write_group * ratio;
563 * Load the setting in the SCC manager
564 * Although OCT affects only write data, the OCT delay is controlled
565 * by the DQS logic block which is instantiated once per read group.
566 * For protocols where a write group consists of multiple read groups,
567 * the setting must be set multiple times.
569 for (i = 0; i < ratio; i++)
570 writel(base + i, &sdr_scc_mgr->dqs_ena);
574 * scc_mgr_zero_group() - Zero all configs for a group
576 * Zero DQ, DM, DQS and OCT configs for a group.
578 static void scc_mgr_zero_group(const u32 write_group, const int out_only)
582 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
583 r += NUM_RANKS_PER_SHADOW_REG) {
584 /* Zero all DQ config settings. */
585 for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
586 scc_mgr_set_dq_out1_delay(i, 0);
588 scc_mgr_set_dq_in_delay(i, 0);
591 /* Multicast to all DQ enables. */
592 writel(0xff, &sdr_scc_mgr->dq_ena);
594 /* Zero all DM config settings. */
595 for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++)
596 scc_mgr_set_dm_out1_delay(i, 0);
598 /* Multicast to all DM enables. */
599 writel(0xff, &sdr_scc_mgr->dm_ena);
601 /* Zero all DQS IO settings. */
603 scc_mgr_set_dqs_io_in_delay(0);
605 /* Arria V/Cyclone V don't have out2. */
606 scc_mgr_set_dqs_out1_delay(IO_DQS_OUT_RESERVE);
607 scc_mgr_set_oct_out1_delay(write_group, IO_DQS_OUT_RESERVE);
608 scc_mgr_load_dqs_for_write_group(write_group);
610 /* Multicast to all DQS IO enables (only 1 in total). */
611 writel(0, &sdr_scc_mgr->dqs_io_ena);
613 /* Hit update to zero everything. */
614 writel(0, &sdr_scc_mgr->update);
619 * apply and load a particular input delay for the DQ pins in a group
620 * group_bgn is the index of the first dq pin (in the write group)
622 static void scc_mgr_apply_group_dq_in_delay(uint32_t group_bgn, uint32_t delay)
626 for (i = 0, p = group_bgn; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++, p++) {
627 scc_mgr_set_dq_in_delay(p, delay);
633 * scc_mgr_apply_group_dq_out1_delay() - Apply and load an output delay for the DQ pins in a group
634 * @delay: Delay value
636 * Apply and load a particular output delay for the DQ pins in a group.
638 static void scc_mgr_apply_group_dq_out1_delay(const u32 delay)
642 for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
643 scc_mgr_set_dq_out1_delay(i, delay);
648 /* apply and load a particular output delay for the DM pins in a group */
649 static void scc_mgr_apply_group_dm_out1_delay(uint32_t delay1)
653 for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++) {
654 scc_mgr_set_dm_out1_delay(i, delay1);
660 /* apply and load delay on both DQS and OCT out1 */
661 static void scc_mgr_apply_group_dqs_io_and_oct_out1(uint32_t write_group,
664 scc_mgr_set_dqs_out1_delay(delay);
665 scc_mgr_load_dqs_io();
667 scc_mgr_set_oct_out1_delay(write_group, delay);
668 scc_mgr_load_dqs_for_write_group(write_group);
672 * scc_mgr_apply_group_all_out_delay_add() - Apply a delay to the entire output side: DQ, DM, DQS, OCT
673 * @write_group: Write group
674 * @delay: Delay value
676 * Apply a delay to the entire output side: DQ, DM, DQS, OCT.
678 static void scc_mgr_apply_group_all_out_delay_add(const u32 write_group,
684 for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++)
688 for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++)
692 new_delay = READ_SCC_DQS_IO_OUT2_DELAY + delay;
693 if (new_delay > IO_IO_OUT2_DELAY_MAX) {
694 debug_cond(DLEVEL == 1,
695 "%s:%d (%u, %u) DQS: %u > %d; adding %u to OUT1\n",
696 __func__, __LINE__, write_group, delay, new_delay,
697 IO_IO_OUT2_DELAY_MAX,
698 new_delay - IO_IO_OUT2_DELAY_MAX);
699 new_delay -= IO_IO_OUT2_DELAY_MAX;
700 scc_mgr_set_dqs_out1_delay(new_delay);
703 scc_mgr_load_dqs_io();
706 new_delay = READ_SCC_OCT_OUT2_DELAY + delay;
707 if (new_delay > IO_IO_OUT2_DELAY_MAX) {
708 debug_cond(DLEVEL == 1,
709 "%s:%d (%u, %u) DQS: %u > %d; adding %u to OUT1\n",
710 __func__, __LINE__, write_group, delay,
711 new_delay, IO_IO_OUT2_DELAY_MAX,
712 new_delay - IO_IO_OUT2_DELAY_MAX);
713 new_delay -= IO_IO_OUT2_DELAY_MAX;
714 scc_mgr_set_oct_out1_delay(write_group, new_delay);
717 scc_mgr_load_dqs_for_write_group(write_group);
721 * scc_mgr_apply_group_all_out_delay_add() - Apply a delay to the entire output side to all ranks
722 * @write_group: Write group
723 * @delay: Delay value
725 * Apply a delay to the entire output side (DQ, DM, DQS, OCT) to all ranks.
728 scc_mgr_apply_group_all_out_delay_add_all_ranks(const u32 write_group,
733 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
734 r += NUM_RANKS_PER_SHADOW_REG) {
735 scc_mgr_apply_group_all_out_delay_add(write_group, delay);
736 writel(0, &sdr_scc_mgr->update);
741 * set_jump_as_return() - Return instruction optimization
743 * Optimization used to recover some slots in ddr3 inst_rom could be
744 * applied to other protocols if we wanted to
746 static void set_jump_as_return(void)
749 * To save space, we replace return with jump to special shared
750 * RETURN instruction so we set the counter to large value so that
753 writel(0xff, &sdr_rw_load_mgr_regs->load_cntr0);
754 writel(RW_MGR_RETURN, &sdr_rw_load_jump_mgr_regs->load_jump_add0);
758 * delay_for_n_mem_clocks() - Delay for N memory clocks
759 * @clocks: Length of the delay
761 * Delay for N memory clocks.
763 static void delay_for_n_mem_clocks(const u32 clocks)
770 debug("%s:%d: clocks=%u ... start\n", __func__, __LINE__, clocks);
772 /* Scale (rounding up) to get afi clocks. */
773 afi_clocks = DIV_ROUND_UP(clocks, AFI_RATE_RATIO);
774 if (afi_clocks) /* Temporary underflow protection */
778 * Note, we don't bother accounting for being off a little
779 * bit because of a few extra instructions in outer loops.
780 * Note, the loops have a test at the end, and do the test
781 * before the decrement, and so always perform the loop
782 * 1 time more than the counter value
784 c_loop = afi_clocks >> 16;
785 outer = c_loop ? 0xff : (afi_clocks >> 8);
786 inner = outer ? 0xff : afi_clocks;
789 * rom instructions are structured as follows:
791 * IDLE_LOOP2: jnz cntr0, TARGET_A
792 * IDLE_LOOP1: jnz cntr1, TARGET_B
795 * so, when doing nested loops, TARGET_A is set to IDLE_LOOP2, and
796 * TARGET_B is set to IDLE_LOOP2 as well
798 * if we have no outer loop, though, then we can use IDLE_LOOP1 only,
799 * and set TARGET_B to IDLE_LOOP1 and we skip IDLE_LOOP2 entirely
801 * a little confusing, but it helps save precious space in the inst_rom
802 * and sequencer rom and keeps the delays more accurate and reduces
805 if (afi_clocks < 0x100) {
806 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(inner),
807 &sdr_rw_load_mgr_regs->load_cntr1);
809 writel(RW_MGR_IDLE_LOOP1,
810 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
812 writel(RW_MGR_IDLE_LOOP1, SDR_PHYGRP_RWMGRGRP_ADDRESS |
813 RW_MGR_RUN_SINGLE_GROUP_OFFSET);
815 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(inner),
816 &sdr_rw_load_mgr_regs->load_cntr0);
818 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(outer),
819 &sdr_rw_load_mgr_regs->load_cntr1);
821 writel(RW_MGR_IDLE_LOOP2,
822 &sdr_rw_load_jump_mgr_regs->load_jump_add0);
824 writel(RW_MGR_IDLE_LOOP2,
825 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
828 writel(RW_MGR_IDLE_LOOP2,
829 SDR_PHYGRP_RWMGRGRP_ADDRESS |
830 RW_MGR_RUN_SINGLE_GROUP_OFFSET);
831 } while (c_loop-- != 0);
833 debug("%s:%d clocks=%u ... end\n", __func__, __LINE__, clocks);
837 * rw_mgr_mem_init_load_regs() - Load instruction registers
838 * @cntr0: Counter 0 value
839 * @cntr1: Counter 1 value
840 * @cntr2: Counter 2 value
841 * @jump: Jump instruction value
843 * Load instruction registers.
845 static void rw_mgr_mem_init_load_regs(u32 cntr0, u32 cntr1, u32 cntr2, u32 jump)
847 uint32_t grpaddr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
848 RW_MGR_RUN_SINGLE_GROUP_OFFSET;
851 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(cntr0),
852 &sdr_rw_load_mgr_regs->load_cntr0);
853 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(cntr1),
854 &sdr_rw_load_mgr_regs->load_cntr1);
855 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(cntr2),
856 &sdr_rw_load_mgr_regs->load_cntr2);
858 /* Load jump address */
859 writel(jump, &sdr_rw_load_jump_mgr_regs->load_jump_add0);
860 writel(jump, &sdr_rw_load_jump_mgr_regs->load_jump_add1);
861 writel(jump, &sdr_rw_load_jump_mgr_regs->load_jump_add2);
863 /* Execute count instruction */
864 writel(jump, grpaddr);
868 * rw_mgr_mem_load_user() - Load user calibration values
869 * @fin1: Final instruction 1
870 * @fin2: Final instruction 2
871 * @precharge: If 1, precharge the banks at the end
873 * Load user calibration values and optionally precharge the banks.
875 static void rw_mgr_mem_load_user(const u32 fin1, const u32 fin2,
878 u32 grpaddr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
879 RW_MGR_RUN_SINGLE_GROUP_OFFSET;
882 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r++) {
883 if (param->skip_ranks[r]) {
884 /* request to skip the rank */
889 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_OFF);
891 /* precharge all banks ... */
893 writel(RW_MGR_PRECHARGE_ALL, grpaddr);
896 * USER Use Mirror-ed commands for odd ranks if address
899 if ((RW_MGR_MEM_ADDRESS_MIRRORING >> r) & 0x1) {
900 set_jump_as_return();
901 writel(RW_MGR_MRS2_MIRR, grpaddr);
902 delay_for_n_mem_clocks(4);
903 set_jump_as_return();
904 writel(RW_MGR_MRS3_MIRR, grpaddr);
905 delay_for_n_mem_clocks(4);
906 set_jump_as_return();
907 writel(RW_MGR_MRS1_MIRR, grpaddr);
908 delay_for_n_mem_clocks(4);
909 set_jump_as_return();
910 writel(fin1, grpaddr);
912 set_jump_as_return();
913 writel(RW_MGR_MRS2, grpaddr);
914 delay_for_n_mem_clocks(4);
915 set_jump_as_return();
916 writel(RW_MGR_MRS3, grpaddr);
917 delay_for_n_mem_clocks(4);
918 set_jump_as_return();
919 writel(RW_MGR_MRS1, grpaddr);
920 set_jump_as_return();
921 writel(fin2, grpaddr);
927 set_jump_as_return();
928 writel(RW_MGR_ZQCL, grpaddr);
930 /* tZQinit = tDLLK = 512 ck cycles */
931 delay_for_n_mem_clocks(512);
936 * rw_mgr_mem_initialize() - Initialize RW Manager
938 * Initialize RW Manager.
940 static void rw_mgr_mem_initialize(void)
942 debug("%s:%d\n", __func__, __LINE__);
944 /* The reset / cke part of initialization is broadcasted to all ranks */
945 writel(RW_MGR_RANK_ALL, SDR_PHYGRP_RWMGRGRP_ADDRESS |
946 RW_MGR_SET_CS_AND_ODT_MASK_OFFSET);
949 * Here's how you load register for a loop
950 * Counters are located @ 0x800
951 * Jump address are located @ 0xC00
952 * For both, registers 0 to 3 are selected using bits 3 and 2, like
953 * in 0x800, 0x804, 0x808, 0x80C and 0xC00, 0xC04, 0xC08, 0xC0C
954 * I know this ain't pretty, but Avalon bus throws away the 2 least
958 /* Start with memory RESET activated */
963 * 200us @ 266MHz (3.75 ns) ~ 54000 clock cycles
964 * If a and b are the number of iteration in 2 nested loops
965 * it takes the following number of cycles to complete the operation:
966 * number_of_cycles = ((2 + n) * a + 2) * b
967 * where n is the number of instruction in the inner loop
968 * One possible solution is n = 0 , a = 256 , b = 106 => a = FF,
971 rw_mgr_mem_init_load_regs(SEQ_TINIT_CNTR0_VAL, SEQ_TINIT_CNTR1_VAL,
973 RW_MGR_INIT_RESET_0_CKE_0);
975 /* Indicate that memory is stable. */
976 writel(1, &phy_mgr_cfg->reset_mem_stbl);
979 * transition the RESET to high
984 * 500us @ 266MHz (3.75 ns) ~ 134000 clock cycles
985 * If a and b are the number of iteration in 2 nested loops
986 * it takes the following number of cycles to complete the operation
987 * number_of_cycles = ((2 + n) * a + 2) * b
988 * where n is the number of instruction in the inner loop
989 * One possible solution is n = 2 , a = 131 , b = 256 => a = 83,
992 rw_mgr_mem_init_load_regs(SEQ_TRESET_CNTR0_VAL, SEQ_TRESET_CNTR1_VAL,
993 SEQ_TRESET_CNTR2_VAL,
994 RW_MGR_INIT_RESET_1_CKE_0);
996 /* Bring up clock enable. */
998 /* tXRP < 250 ck cycles */
999 delay_for_n_mem_clocks(250);
1001 rw_mgr_mem_load_user(RW_MGR_MRS0_DLL_RESET_MIRR, RW_MGR_MRS0_DLL_RESET,
1006 * rw_mgr_mem_handoff() - Hand off the memory to user
1008 * At the end of calibration we have to program the user settings in
1009 * and hand off the memory to the user.
1011 static void rw_mgr_mem_handoff(void)
1013 rw_mgr_mem_load_user(RW_MGR_MRS0_USER_MIRR, RW_MGR_MRS0_USER, 1);
1015 * Need to wait tMOD (12CK or 15ns) time before issuing other
1016 * commands, but we will have plenty of NIOS cycles before actual
1017 * handoff so its okay.
1022 * rw_mgr_mem_calibrate_write_test_issue() - Issue write test command
1023 * @group: Write Group
1026 * Issue write test command. Two variants are provided, one that just tests
1027 * a write pattern and another that tests datamask functionality.
1029 static void rw_mgr_mem_calibrate_write_test_issue(u32 group,
1032 const u32 quick_write_mode =
1033 (STATIC_CALIB_STEPS & CALIB_SKIP_WRITES) &&
1034 ENABLE_SUPER_QUICK_CALIBRATION;
1035 u32 mcc_instruction;
1036 u32 rw_wl_nop_cycles;
1039 * Set counter and jump addresses for the right
1040 * number of NOP cycles.
1041 * The number of supported NOP cycles can range from -1 to infinity
1042 * Three different cases are handled:
1044 * 1. For a number of NOP cycles greater than 0, the RW Mgr looping
1045 * mechanism will be used to insert the right number of NOPs
1047 * 2. For a number of NOP cycles equals to 0, the micro-instruction
1048 * issuing the write command will jump straight to the
1049 * micro-instruction that turns on DQS (for DDRx), or outputs write
1050 * data (for RLD), skipping
1051 * the NOP micro-instruction all together
1053 * 3. A number of NOP cycles equal to -1 indicates that DQS must be
1054 * turned on in the same micro-instruction that issues the write
1055 * command. Then we need
1056 * to directly jump to the micro-instruction that sends out the data
1058 * NOTE: Implementing this mechanism uses 2 RW Mgr jump-counters
1059 * (2 and 3). One jump-counter (0) is used to perform multiple
1060 * write-read operations.
1061 * one counter left to issue this command in "multiple-group" mode
1064 rw_wl_nop_cycles = gbl->rw_wl_nop_cycles;
1066 if (rw_wl_nop_cycles == -1) {
1068 * CNTR 2 - We want to execute the special write operation that
1069 * turns on DQS right away and then skip directly to the
1070 * instruction that sends out the data. We set the counter to a
1071 * large number so that the jump is always taken.
1073 writel(0xFF, &sdr_rw_load_mgr_regs->load_cntr2);
1075 /* CNTR 3 - Not used */
1077 mcc_instruction = RW_MGR_LFSR_WR_RD_DM_BANK_0_WL_1;
1078 writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_DATA,
1079 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
1080 writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_NOP,
1081 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
1083 mcc_instruction = RW_MGR_LFSR_WR_RD_BANK_0_WL_1;
1084 writel(RW_MGR_LFSR_WR_RD_BANK_0_DATA,
1085 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
1086 writel(RW_MGR_LFSR_WR_RD_BANK_0_NOP,
1087 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
1089 } else if (rw_wl_nop_cycles == 0) {
1091 * CNTR 2 - We want to skip the NOP operation and go straight
1092 * to the DQS enable instruction. We set the counter to a large
1093 * number so that the jump is always taken.
1095 writel(0xFF, &sdr_rw_load_mgr_regs->load_cntr2);
1097 /* CNTR 3 - Not used */
1099 mcc_instruction = RW_MGR_LFSR_WR_RD_DM_BANK_0;
1100 writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_DQS,
1101 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
1103 mcc_instruction = RW_MGR_LFSR_WR_RD_BANK_0;
1104 writel(RW_MGR_LFSR_WR_RD_BANK_0_DQS,
1105 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
1109 * CNTR 2 - In this case we want to execute the next instruction
1110 * and NOT take the jump. So we set the counter to 0. The jump
1111 * address doesn't count.
1113 writel(0x0, &sdr_rw_load_mgr_regs->load_cntr2);
1114 writel(0x0, &sdr_rw_load_jump_mgr_regs->load_jump_add2);
1117 * CNTR 3 - Set the nop counter to the number of cycles we
1118 * need to loop for, minus 1.
1120 writel(rw_wl_nop_cycles - 1, &sdr_rw_load_mgr_regs->load_cntr3);
1122 mcc_instruction = RW_MGR_LFSR_WR_RD_DM_BANK_0;
1123 writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_NOP,
1124 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
1126 mcc_instruction = RW_MGR_LFSR_WR_RD_BANK_0;
1127 writel(RW_MGR_LFSR_WR_RD_BANK_0_NOP,
1128 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
1132 writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS |
1133 RW_MGR_RESET_READ_DATAPATH_OFFSET);
1135 if (quick_write_mode)
1136 writel(0x08, &sdr_rw_load_mgr_regs->load_cntr0);
1138 writel(0x40, &sdr_rw_load_mgr_regs->load_cntr0);
1140 writel(mcc_instruction, &sdr_rw_load_jump_mgr_regs->load_jump_add0);
1143 * CNTR 1 - This is used to ensure enough time elapses
1144 * for read data to come back.
1146 writel(0x30, &sdr_rw_load_mgr_regs->load_cntr1);
1149 writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_WAIT,
1150 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
1152 writel(RW_MGR_LFSR_WR_RD_BANK_0_WAIT,
1153 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
1156 writel(mcc_instruction, (SDR_PHYGRP_RWMGRGRP_ADDRESS |
1157 RW_MGR_RUN_SINGLE_GROUP_OFFSET) +
1162 * rw_mgr_mem_calibrate_write_test() - Test writes, check for single/multiple pass
1163 * @rank_bgn: Rank number
1164 * @write_group: Write Group
1166 * @all_correct: All bits must be correct in the mask
1167 * @bit_chk: Resulting bit mask after the test
1168 * @all_ranks: Test all ranks
1170 * Test writes, can check for a single bit pass or multiple bit pass.
1173 rw_mgr_mem_calibrate_write_test(const u32 rank_bgn, const u32 write_group,
1174 const u32 use_dm, const u32 all_correct,
1175 u32 *bit_chk, const u32 all_ranks)
1177 const u32 rank_end = all_ranks ?
1178 RW_MGR_MEM_NUMBER_OF_RANKS :
1179 (rank_bgn + NUM_RANKS_PER_SHADOW_REG);
1180 const u32 shift_ratio = RW_MGR_MEM_DQ_PER_WRITE_DQS /
1181 RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS;
1182 const u32 correct_mask_vg = param->write_correct_mask_vg;
1184 u32 tmp_bit_chk, base_rw_mgr;
1187 *bit_chk = param->write_correct_mask;
1189 for (r = rank_bgn; r < rank_end; r++) {
1190 /* Request to skip the rank */
1191 if (param->skip_ranks[r])
1195 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
1198 for (vg = RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS - 1;
1200 /* Reset the FIFOs to get pointers to known state. */
1201 writel(0, &phy_mgr_cmd->fifo_reset);
1203 rw_mgr_mem_calibrate_write_test_issue(
1205 RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS + vg,
1208 base_rw_mgr = readl(SDR_PHYGRP_RWMGRGRP_ADDRESS);
1209 tmp_bit_chk <<= shift_ratio;
1210 tmp_bit_chk |= (correct_mask_vg & ~(base_rw_mgr));
1213 *bit_chk &= tmp_bit_chk;
1216 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
1218 debug_cond(DLEVEL == 2,
1219 "write_test(%u,%u,ALL) : %u == %u => %i\n",
1220 write_group, use_dm, *bit_chk,
1221 param->write_correct_mask,
1222 *bit_chk == param->write_correct_mask);
1223 return *bit_chk == param->write_correct_mask;
1225 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
1226 debug_cond(DLEVEL == 2,
1227 "write_test(%u,%u,ONE) : %u != %i => %i\n",
1228 write_group, use_dm, *bit_chk, 0, *bit_chk != 0);
1229 return *bit_chk != 0x00;
1234 * rw_mgr_mem_calibrate_read_test_patterns() - Read back test patterns
1235 * @rank_bgn: Rank number
1236 * @group: Read/Write Group
1237 * @all_ranks: Test all ranks
1239 * Performs a guaranteed read on the patterns we are going to use during a
1240 * read test to ensure memory works.
1243 rw_mgr_mem_calibrate_read_test_patterns(const u32 rank_bgn, const u32 group,
1244 const u32 all_ranks)
1246 const u32 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
1247 RW_MGR_RUN_SINGLE_GROUP_OFFSET;
1248 const u32 addr_offset =
1249 (group * RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS) << 2;
1250 const u32 rank_end = all_ranks ?
1251 RW_MGR_MEM_NUMBER_OF_RANKS :
1252 (rank_bgn + NUM_RANKS_PER_SHADOW_REG);
1253 const u32 shift_ratio = RW_MGR_MEM_DQ_PER_READ_DQS /
1254 RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS;
1255 const u32 correct_mask_vg = param->read_correct_mask_vg;
1257 u32 tmp_bit_chk, base_rw_mgr, bit_chk;
1261 bit_chk = param->read_correct_mask;
1263 for (r = rank_bgn; r < rank_end; r++) {
1264 /* Request to skip the rank */
1265 if (param->skip_ranks[r])
1269 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
1271 /* Load up a constant bursts of read commands */
1272 writel(0x20, &sdr_rw_load_mgr_regs->load_cntr0);
1273 writel(RW_MGR_GUARANTEED_READ,
1274 &sdr_rw_load_jump_mgr_regs->load_jump_add0);
1276 writel(0x20, &sdr_rw_load_mgr_regs->load_cntr1);
1277 writel(RW_MGR_GUARANTEED_READ_CONT,
1278 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
1281 for (vg = RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS - 1;
1283 /* Reset the FIFOs to get pointers to known state. */
1284 writel(0, &phy_mgr_cmd->fifo_reset);
1285 writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS |
1286 RW_MGR_RESET_READ_DATAPATH_OFFSET);
1287 writel(RW_MGR_GUARANTEED_READ,
1288 addr + addr_offset + (vg << 2));
1290 base_rw_mgr = readl(SDR_PHYGRP_RWMGRGRP_ADDRESS);
1291 tmp_bit_chk <<= shift_ratio;
1292 tmp_bit_chk |= correct_mask_vg & ~base_rw_mgr;
1295 bit_chk &= tmp_bit_chk;
1298 writel(RW_MGR_CLEAR_DQS_ENABLE, addr + (group << 2));
1300 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
1302 if (bit_chk != param->read_correct_mask)
1305 debug_cond(DLEVEL == 1,
1306 "%s:%d test_load_patterns(%u,ALL) => (%u == %u) => %i\n",
1307 __func__, __LINE__, group, bit_chk,
1308 param->read_correct_mask, ret);
1314 * rw_mgr_mem_calibrate_read_load_patterns() - Load up the patterns for read test
1315 * @rank_bgn: Rank number
1316 * @all_ranks: Test all ranks
1318 * Load up the patterns we are going to use during a read test.
1320 static void rw_mgr_mem_calibrate_read_load_patterns(const u32 rank_bgn,
1321 const int all_ranks)
1323 const u32 rank_end = all_ranks ?
1324 RW_MGR_MEM_NUMBER_OF_RANKS :
1325 (rank_bgn + NUM_RANKS_PER_SHADOW_REG);
1328 debug("%s:%d\n", __func__, __LINE__);
1330 for (r = rank_bgn; r < rank_end; r++) {
1331 if (param->skip_ranks[r])
1332 /* request to skip the rank */
1336 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
1338 /* Load up a constant bursts */
1339 writel(0x20, &sdr_rw_load_mgr_regs->load_cntr0);
1341 writel(RW_MGR_GUARANTEED_WRITE_WAIT0,
1342 &sdr_rw_load_jump_mgr_regs->load_jump_add0);
1344 writel(0x20, &sdr_rw_load_mgr_regs->load_cntr1);
1346 writel(RW_MGR_GUARANTEED_WRITE_WAIT1,
1347 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
1349 writel(0x04, &sdr_rw_load_mgr_regs->load_cntr2);
1351 writel(RW_MGR_GUARANTEED_WRITE_WAIT2,
1352 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
1354 writel(0x04, &sdr_rw_load_mgr_regs->load_cntr3);
1356 writel(RW_MGR_GUARANTEED_WRITE_WAIT3,
1357 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
1359 writel(RW_MGR_GUARANTEED_WRITE, SDR_PHYGRP_RWMGRGRP_ADDRESS |
1360 RW_MGR_RUN_SINGLE_GROUP_OFFSET);
1363 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
1367 * rw_mgr_mem_calibrate_read_test() - Perform READ test on single rank
1368 * @rank_bgn: Rank number
1369 * @group: Read/Write group
1370 * @num_tries: Number of retries of the test
1371 * @all_correct: All bits must be correct in the mask
1372 * @bit_chk: Resulting bit mask after the test
1373 * @all_groups: Test all R/W groups
1374 * @all_ranks: Test all ranks
1376 * Try a read and see if it returns correct data back. Test has dummy reads
1377 * inserted into the mix used to align DQS enable. Test has more thorough
1378 * checks than the regular read test.
1381 rw_mgr_mem_calibrate_read_test(const u32 rank_bgn, const u32 group,
1382 const u32 num_tries, const u32 all_correct,
1384 const u32 all_groups, const u32 all_ranks)
1386 const u32 rank_end = all_ranks ? RW_MGR_MEM_NUMBER_OF_RANKS :
1387 (rank_bgn + NUM_RANKS_PER_SHADOW_REG);
1388 const u32 quick_read_mode =
1389 ((STATIC_CALIB_STEPS & CALIB_SKIP_DELAY_SWEEPS) &&
1390 ENABLE_SUPER_QUICK_CALIBRATION);
1391 u32 correct_mask_vg = param->read_correct_mask_vg;
1398 *bit_chk = param->read_correct_mask;
1400 for (r = rank_bgn; r < rank_end; r++) {
1401 if (param->skip_ranks[r])
1402 /* request to skip the rank */
1406 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
1408 writel(0x10, &sdr_rw_load_mgr_regs->load_cntr1);
1410 writel(RW_MGR_READ_B2B_WAIT1,
1411 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
1413 writel(0x10, &sdr_rw_load_mgr_regs->load_cntr2);
1414 writel(RW_MGR_READ_B2B_WAIT2,
1415 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
1417 if (quick_read_mode)
1418 writel(0x1, &sdr_rw_load_mgr_regs->load_cntr0);
1419 /* need at least two (1+1) reads to capture failures */
1420 else if (all_groups)
1421 writel(0x06, &sdr_rw_load_mgr_regs->load_cntr0);
1423 writel(0x32, &sdr_rw_load_mgr_regs->load_cntr0);
1425 writel(RW_MGR_READ_B2B,
1426 &sdr_rw_load_jump_mgr_regs->load_jump_add0);
1428 writel(RW_MGR_MEM_IF_READ_DQS_WIDTH *
1429 RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS - 1,
1430 &sdr_rw_load_mgr_regs->load_cntr3);
1432 writel(0x0, &sdr_rw_load_mgr_regs->load_cntr3);
1434 writel(RW_MGR_READ_B2B,
1435 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
1438 for (vg = RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS - 1; vg >= 0;
1440 /* Reset the FIFOs to get pointers to known state. */
1441 writel(0, &phy_mgr_cmd->fifo_reset);
1442 writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS |
1443 RW_MGR_RESET_READ_DATAPATH_OFFSET);
1446 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
1447 RW_MGR_RUN_ALL_GROUPS_OFFSET;
1449 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
1450 RW_MGR_RUN_SINGLE_GROUP_OFFSET;
1453 writel(RW_MGR_READ_B2B, addr +
1454 ((group * RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS +
1457 base_rw_mgr = readl(SDR_PHYGRP_RWMGRGRP_ADDRESS);
1458 tmp_bit_chk <<= RW_MGR_MEM_DQ_PER_READ_DQS /
1459 RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS;
1460 tmp_bit_chk |= correct_mask_vg & ~(base_rw_mgr);
1463 *bit_chk &= tmp_bit_chk;
1466 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET;
1467 writel(RW_MGR_CLEAR_DQS_ENABLE, addr + (group << 2));
1469 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
1472 ret = (*bit_chk == param->read_correct_mask);
1473 debug_cond(DLEVEL == 2,
1474 "%s:%d read_test(%u,ALL,%u) => (%u == %u) => %i\n",
1475 __func__, __LINE__, group, all_groups, *bit_chk,
1476 param->read_correct_mask, ret);
1478 ret = (*bit_chk != 0x00);
1479 debug_cond(DLEVEL == 2,
1480 "%s:%d read_test(%u,ONE,%u) => (%u != %u) => %i\n",
1481 __func__, __LINE__, group, all_groups, *bit_chk,
1489 * rw_mgr_mem_calibrate_read_test_all_ranks() - Perform READ test on all ranks
1490 * @grp: Read/Write group
1491 * @num_tries: Number of retries of the test
1492 * @all_correct: All bits must be correct in the mask
1493 * @all_groups: Test all R/W groups
1495 * Perform a READ test across all memory ranks.
1498 rw_mgr_mem_calibrate_read_test_all_ranks(const u32 grp, const u32 num_tries,
1499 const u32 all_correct,
1500 const u32 all_groups)
1503 return rw_mgr_mem_calibrate_read_test(0, grp, num_tries, all_correct,
1504 &bit_chk, all_groups, 1);
1508 * rw_mgr_incr_vfifo() - Increase VFIFO value
1509 * @grp: Read/Write group
1511 * Increase VFIFO value.
1513 static void rw_mgr_incr_vfifo(const u32 grp)
1515 writel(grp, &phy_mgr_cmd->inc_vfifo_hard_phy);
1519 * rw_mgr_decr_vfifo() - Decrease VFIFO value
1520 * @grp: Read/Write group
1522 * Decrease VFIFO value.
1524 static void rw_mgr_decr_vfifo(const u32 grp)
1528 for (i = 0; i < VFIFO_SIZE - 1; i++)
1529 rw_mgr_incr_vfifo(grp);
1533 * find_vfifo_failing_read() - Push VFIFO to get a failing read
1534 * @grp: Read/Write group
1536 * Push VFIFO until a failing read happens.
1538 static int find_vfifo_failing_read(const u32 grp)
1540 u32 v, ret, fail_cnt = 0;
1542 for (v = 0; v < VFIFO_SIZE; v++) {
1543 debug_cond(DLEVEL == 2, "%s:%d: vfifo %u\n",
1544 __func__, __LINE__, v);
1545 ret = rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1,
1554 /* Fiddle with FIFO. */
1555 rw_mgr_incr_vfifo(grp);
1558 /* No failing read found! Something must have gone wrong. */
1559 debug_cond(DLEVEL == 2, "%s:%d: vfifo failed\n", __func__, __LINE__);
1564 * sdr_find_phase_delay() - Find DQS enable phase or delay
1565 * @working: If 1, look for working phase/delay, if 0, look for non-working
1566 * @delay: If 1, look for delay, if 0, look for phase
1567 * @grp: Read/Write group
1568 * @work: Working window position
1569 * @work_inc: Working window increment
1570 * @pd: DQS Phase/Delay Iterator
1572 * Find working or non-working DQS enable phase setting.
1574 static int sdr_find_phase_delay(int working, int delay, const u32 grp,
1575 u32 *work, const u32 work_inc, u32 *pd)
1577 const u32 max = delay ? IO_DQS_EN_DELAY_MAX : IO_DQS_EN_PHASE_MAX;
1580 for (; *pd <= max; (*pd)++) {
1582 scc_mgr_set_dqs_en_delay_all_ranks(grp, *pd);
1584 scc_mgr_set_dqs_en_phase_all_ranks(grp, *pd);
1586 ret = rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1,
1601 * sdr_find_phase() - Find DQS enable phase
1602 * @working: If 1, look for working phase, if 0, look for non-working phase
1603 * @grp: Read/Write group
1604 * @work: Working window position
1606 * @p: DQS Phase Iterator
1608 * Find working or non-working DQS enable phase setting.
1610 static int sdr_find_phase(int working, const u32 grp, u32 *work,
1613 const u32 end = VFIFO_SIZE + (working ? 0 : 1);
1616 for (; *i < end; (*i)++) {
1620 ret = sdr_find_phase_delay(working, 0, grp, work,
1621 IO_DELAY_PER_OPA_TAP, p);
1625 if (*p > IO_DQS_EN_PHASE_MAX) {
1626 /* Fiddle with FIFO. */
1627 rw_mgr_incr_vfifo(grp);
1637 * sdr_working_phase() - Find working DQS enable phase
1638 * @grp: Read/Write group
1639 * @work_bgn: Working window start position
1640 * @d: dtaps output value
1641 * @p: DQS Phase Iterator
1644 * Find working DQS enable phase setting.
1646 static int sdr_working_phase(const u32 grp, u32 *work_bgn, u32 *d,
1649 const u32 dtaps_per_ptap = IO_DELAY_PER_OPA_TAP /
1650 IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
1655 for (*d = 0; *d <= dtaps_per_ptap; (*d)++) {
1657 scc_mgr_set_dqs_en_delay_all_ranks(grp, *d);
1658 ret = sdr_find_phase(1, grp, work_bgn, i, p);
1661 *work_bgn += IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
1664 /* Cannot find working solution */
1665 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: no vfifo/ptap/dtap\n",
1666 __func__, __LINE__);
1671 * sdr_backup_phase() - Find DQS enable backup phase
1672 * @grp: Read/Write group
1673 * @work_bgn: Working window start position
1674 * @p: DQS Phase Iterator
1676 * Find DQS enable backup phase setting.
1678 static void sdr_backup_phase(const u32 grp, u32 *work_bgn, u32 *p)
1683 /* Special case code for backing up a phase */
1685 *p = IO_DQS_EN_PHASE_MAX;
1686 rw_mgr_decr_vfifo(grp);
1690 tmp_delay = *work_bgn - IO_DELAY_PER_OPA_TAP;
1691 scc_mgr_set_dqs_en_phase_all_ranks(grp, *p);
1693 for (d = 0; d <= IO_DQS_EN_DELAY_MAX && tmp_delay < *work_bgn; d++) {
1694 scc_mgr_set_dqs_en_delay_all_ranks(grp, d);
1696 ret = rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1,
1699 *work_bgn = tmp_delay;
1703 tmp_delay += IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
1706 /* Restore VFIFO to old state before we decremented it (if needed). */
1708 if (*p > IO_DQS_EN_PHASE_MAX) {
1710 rw_mgr_incr_vfifo(grp);
1713 scc_mgr_set_dqs_en_delay_all_ranks(grp, 0);
1717 * sdr_nonworking_phase() - Find non-working DQS enable phase
1718 * @grp: Read/Write group
1719 * @work_end: Working window end position
1720 * @p: DQS Phase Iterator
1723 * Find non-working DQS enable phase setting.
1725 static int sdr_nonworking_phase(const u32 grp, u32 *work_end, u32 *p, u32 *i)
1730 *work_end += IO_DELAY_PER_OPA_TAP;
1731 if (*p > IO_DQS_EN_PHASE_MAX) {
1732 /* Fiddle with FIFO. */
1734 rw_mgr_incr_vfifo(grp);
1737 ret = sdr_find_phase(0, grp, work_end, i, p);
1739 /* Cannot see edge of failing read. */
1740 debug_cond(DLEVEL == 2, "%s:%d: end: failed\n",
1741 __func__, __LINE__);
1748 * sdr_find_window_center() - Find center of the working DQS window.
1749 * @grp: Read/Write group
1750 * @work_bgn: First working settings
1751 * @work_end: Last working settings
1753 * Find center of the working DQS enable window.
1755 static int sdr_find_window_center(const u32 grp, const u32 work_bgn,
1762 work_mid = (work_bgn + work_end) / 2;
1764 debug_cond(DLEVEL == 2, "work_bgn=%d work_end=%d work_mid=%d\n",
1765 work_bgn, work_end, work_mid);
1766 /* Get the middle delay to be less than a VFIFO delay */
1767 tmp_delay = (IO_DQS_EN_PHASE_MAX + 1) * IO_DELAY_PER_OPA_TAP;
1769 debug_cond(DLEVEL == 2, "vfifo ptap delay %d\n", tmp_delay);
1770 work_mid %= tmp_delay;
1771 debug_cond(DLEVEL == 2, "new work_mid %d\n", work_mid);
1773 tmp_delay = rounddown(work_mid, IO_DELAY_PER_OPA_TAP);
1774 if (tmp_delay > IO_DQS_EN_PHASE_MAX * IO_DELAY_PER_OPA_TAP)
1775 tmp_delay = IO_DQS_EN_PHASE_MAX * IO_DELAY_PER_OPA_TAP;
1776 p = tmp_delay / IO_DELAY_PER_OPA_TAP;
1778 debug_cond(DLEVEL == 2, "new p %d, tmp_delay=%d\n", p, tmp_delay);
1780 d = DIV_ROUND_UP(work_mid - tmp_delay, IO_DELAY_PER_DQS_EN_DCHAIN_TAP);
1781 if (d > IO_DQS_EN_DELAY_MAX)
1782 d = IO_DQS_EN_DELAY_MAX;
1783 tmp_delay += d * IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
1785 debug_cond(DLEVEL == 2, "new d %d, tmp_delay=%d\n", d, tmp_delay);
1787 scc_mgr_set_dqs_en_phase_all_ranks(grp, p);
1788 scc_mgr_set_dqs_en_delay_all_ranks(grp, d);
1791 * push vfifo until we can successfully calibrate. We can do this
1792 * because the largest possible margin in 1 VFIFO cycle.
1794 for (i = 0; i < VFIFO_SIZE; i++) {
1795 debug_cond(DLEVEL == 2, "find_dqs_en_phase: center\n");
1796 if (rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1,
1799 debug_cond(DLEVEL == 2,
1800 "%s:%d center: found: ptap=%u dtap=%u\n",
1801 __func__, __LINE__, p, d);
1805 /* Fiddle with FIFO. */
1806 rw_mgr_incr_vfifo(grp);
1809 debug_cond(DLEVEL == 2, "%s:%d center: failed.\n",
1810 __func__, __LINE__);
1815 * rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase() - Find a good DQS enable to use
1816 * @grp: Read/Write Group
1818 * Find a good DQS enable to use.
1820 static int rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(const u32 grp)
1824 u32 work_bgn, work_end;
1825 u32 found_passing_read, found_failing_read, initial_failing_dtap;
1828 debug("%s:%d %u\n", __func__, __LINE__, grp);
1830 reg_file_set_sub_stage(CAL_SUBSTAGE_VFIFO_CENTER);
1832 scc_mgr_set_dqs_en_delay_all_ranks(grp, 0);
1833 scc_mgr_set_dqs_en_phase_all_ranks(grp, 0);
1835 /* Step 0: Determine number of delay taps for each phase tap. */
1836 dtaps_per_ptap = IO_DELAY_PER_OPA_TAP / IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
1838 /* Step 1: First push vfifo until we get a failing read. */
1839 find_vfifo_failing_read(grp);
1841 /* Step 2: Find first working phase, increment in ptaps. */
1843 ret = sdr_working_phase(grp, &work_bgn, &d, &p, &i);
1847 work_end = work_bgn;
1850 * If d is 0 then the working window covers a phase tap and we can
1851 * follow the old procedure. Otherwise, we've found the beginning
1852 * and we need to increment the dtaps until we find the end.
1856 * Step 3a: If we have room, back off by one and
1857 * increment in dtaps.
1859 sdr_backup_phase(grp, &work_bgn, &p);
1862 * Step 4a: go forward from working phase to non working
1863 * phase, increment in ptaps.
1865 ret = sdr_nonworking_phase(grp, &work_end, &p, &i);
1869 /* Step 5a: Back off one from last, increment in dtaps. */
1871 /* Special case code for backing up a phase */
1873 p = IO_DQS_EN_PHASE_MAX;
1874 rw_mgr_decr_vfifo(grp);
1879 work_end -= IO_DELAY_PER_OPA_TAP;
1880 scc_mgr_set_dqs_en_phase_all_ranks(grp, p);
1884 debug_cond(DLEVEL == 2, "%s:%d p: ptap=%u\n",
1885 __func__, __LINE__, p);
1888 /* The dtap increment to find the failing edge is done here. */
1889 sdr_find_phase_delay(0, 1, grp, &work_end,
1890 IO_DELAY_PER_DQS_EN_DCHAIN_TAP, &d);
1892 /* Go back to working dtap */
1894 work_end -= IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
1896 debug_cond(DLEVEL == 2,
1897 "%s:%d p/d: ptap=%u dtap=%u end=%u\n",
1898 __func__, __LINE__, p, d - 1, work_end);
1900 if (work_end < work_bgn) {
1902 debug_cond(DLEVEL == 2, "%s:%d end-2: failed\n",
1903 __func__, __LINE__);
1907 debug_cond(DLEVEL == 2, "%s:%d found range [%u,%u]\n",
1908 __func__, __LINE__, work_bgn, work_end);
1911 * We need to calculate the number of dtaps that equal a ptap.
1912 * To do that we'll back up a ptap and re-find the edge of the
1913 * window using dtaps
1915 debug_cond(DLEVEL == 2, "%s:%d calculate dtaps_per_ptap for tracking\n",
1916 __func__, __LINE__);
1918 /* Special case code for backing up a phase */
1920 p = IO_DQS_EN_PHASE_MAX;
1921 rw_mgr_decr_vfifo(grp);
1922 debug_cond(DLEVEL == 2, "%s:%d backedup cycle/phase: p=%u\n",
1923 __func__, __LINE__, p);
1926 debug_cond(DLEVEL == 2, "%s:%d backedup phase only: p=%u",
1927 __func__, __LINE__, p);
1930 scc_mgr_set_dqs_en_phase_all_ranks(grp, p);
1933 * Increase dtap until we first see a passing read (in case the
1934 * window is smaller than a ptap), and then a failing read to
1935 * mark the edge of the window again.
1938 /* Find a passing read. */
1939 debug_cond(DLEVEL == 2, "%s:%d find passing read\n",
1940 __func__, __LINE__);
1942 initial_failing_dtap = d;
1944 found_passing_read = !sdr_find_phase_delay(1, 1, grp, NULL, 0, &d);
1945 if (found_passing_read) {
1946 /* Find a failing read. */
1947 debug_cond(DLEVEL == 2, "%s:%d find failing read\n",
1948 __func__, __LINE__);
1950 found_failing_read = !sdr_find_phase_delay(0, 1, grp, NULL, 0,
1953 debug_cond(DLEVEL == 1,
1954 "%s:%d failed to calculate dtaps per ptap. Fall back on static value\n",
1955 __func__, __LINE__);
1959 * The dynamically calculated dtaps_per_ptap is only valid if we
1960 * found a passing/failing read. If we didn't, it means d hit the max
1961 * (IO_DQS_EN_DELAY_MAX). Otherwise, dtaps_per_ptap retains its
1962 * statically calculated value.
1964 if (found_passing_read && found_failing_read)
1965 dtaps_per_ptap = d - initial_failing_dtap;
1967 writel(dtaps_per_ptap, &sdr_reg_file->dtaps_per_ptap);
1968 debug_cond(DLEVEL == 2, "%s:%d dtaps_per_ptap=%u - %u = %u",
1969 __func__, __LINE__, d, initial_failing_dtap, dtaps_per_ptap);
1971 /* Step 6: Find the centre of the window. */
1972 ret = sdr_find_window_center(grp, work_bgn, work_end);
1978 * search_stop_check() - Check if the detected edge is valid
1979 * @write: Perform read (Stage 2) or write (Stage 3) calibration
1981 * @rank_bgn: Rank number
1982 * @write_group: Write Group
1983 * @read_group: Read Group
1984 * @bit_chk: Resulting bit mask after the test
1985 * @sticky_bit_chk: Resulting sticky bit mask after the test
1986 * @use_read_test: Perform read test
1988 * Test if the found edge is valid.
1990 static u32 search_stop_check(const int write, const int d, const int rank_bgn,
1991 const u32 write_group, const u32 read_group,
1992 u32 *bit_chk, u32 *sticky_bit_chk,
1993 const u32 use_read_test)
1995 const u32 ratio = RW_MGR_MEM_IF_READ_DQS_WIDTH /
1996 RW_MGR_MEM_IF_WRITE_DQS_WIDTH;
1997 const u32 correct_mask = write ? param->write_correct_mask :
1998 param->read_correct_mask;
1999 const u32 per_dqs = write ? RW_MGR_MEM_DQ_PER_WRITE_DQS :
2000 RW_MGR_MEM_DQ_PER_READ_DQS;
2003 * Stop searching when the read test doesn't pass AND when
2004 * we've seen a passing read on every bit.
2006 if (write) { /* WRITE-ONLY */
2007 ret = !rw_mgr_mem_calibrate_write_test(rank_bgn, write_group,
2010 } else if (use_read_test) { /* READ-ONLY */
2011 ret = !rw_mgr_mem_calibrate_read_test(rank_bgn, read_group,
2013 PASS_ONE_BIT, bit_chk,
2015 } else { /* READ-ONLY */
2016 rw_mgr_mem_calibrate_write_test(rank_bgn, write_group, 0,
2017 PASS_ONE_BIT, bit_chk, 0);
2018 *bit_chk = *bit_chk >> (per_dqs *
2019 (read_group - (write_group * ratio)));
2020 ret = (*bit_chk == 0);
2022 *sticky_bit_chk = *sticky_bit_chk | *bit_chk;
2023 ret = ret && (*sticky_bit_chk == correct_mask);
2024 debug_cond(DLEVEL == 2,
2025 "%s:%d center(left): dtap=%u => %u == %u && %u",
2026 __func__, __LINE__, d,
2027 *sticky_bit_chk, correct_mask, ret);
2032 * search_left_edge() - Find left edge of DQ/DQS working phase
2033 * @write: Perform read (Stage 2) or write (Stage 3) calibration
2034 * @rank_bgn: Rank number
2035 * @write_group: Write Group
2036 * @read_group: Read Group
2037 * @test_bgn: Rank number to begin the test
2038 * @sticky_bit_chk: Resulting sticky bit mask after the test
2039 * @left_edge: Left edge of the DQ/DQS phase
2040 * @right_edge: Right edge of the DQ/DQS phase
2041 * @use_read_test: Perform read test
2043 * Find left edge of DQ/DQS working phase.
2045 static void search_left_edge(const int write, const int rank_bgn,
2046 const u32 write_group, const u32 read_group, const u32 test_bgn,
2047 u32 *sticky_bit_chk,
2048 int *left_edge, int *right_edge, const u32 use_read_test)
2050 const u32 delay_max = write ? IO_IO_OUT1_DELAY_MAX : IO_IO_IN_DELAY_MAX;
2051 const u32 dqs_max = write ? IO_IO_OUT1_DELAY_MAX : IO_DQS_IN_DELAY_MAX;
2052 const u32 per_dqs = write ? RW_MGR_MEM_DQ_PER_WRITE_DQS :
2053 RW_MGR_MEM_DQ_PER_READ_DQS;
2057 for (d = 0; d <= dqs_max; d++) {
2059 scc_mgr_apply_group_dq_out1_delay(d);
2061 scc_mgr_apply_group_dq_in_delay(test_bgn, d);
2063 writel(0, &sdr_scc_mgr->update);
2065 stop = search_stop_check(write, d, rank_bgn, write_group,
2066 read_group, &bit_chk, sticky_bit_chk,
2072 for (i = 0; i < per_dqs; i++) {
2075 * Remember a passing test as
2081 * If a left edge has not been seen
2082 * yet, then a future passing test
2083 * will mark this edge as the right
2086 if (left_edge[i] == delay_max + 1)
2087 right_edge[i] = -(d + 1);
2093 /* Reset DQ delay chains to 0 */
2095 scc_mgr_apply_group_dq_out1_delay(0);
2097 scc_mgr_apply_group_dq_in_delay(test_bgn, 0);
2099 *sticky_bit_chk = 0;
2100 for (i = per_dqs - 1; i >= 0; i--) {
2101 debug_cond(DLEVEL == 2,
2102 "%s:%d vfifo_center: left_edge[%u]: %d right_edge[%u]: %d\n",
2103 __func__, __LINE__, i, left_edge[i],
2107 * Check for cases where we haven't found the left edge,
2108 * which makes our assignment of the the right edge invalid.
2109 * Reset it to the illegal value.
2111 if ((left_edge[i] == delay_max + 1) &&
2112 (right_edge[i] != delay_max + 1)) {
2113 right_edge[i] = delay_max + 1;
2114 debug_cond(DLEVEL == 2,
2115 "%s:%d vfifo_center: reset right_edge[%u]: %d\n",
2116 __func__, __LINE__, i, right_edge[i]);
2121 * READ: except for bits where we have seen both
2122 * the left and right edge.
2123 * WRITE: except for bits where we have seen the
2126 *sticky_bit_chk <<= 1;
2128 if (left_edge[i] != delay_max + 1)
2129 *sticky_bit_chk |= 1;
2131 if ((left_edge[i] != delay_max + 1) &&
2132 (right_edge[i] != delay_max + 1))
2133 *sticky_bit_chk |= 1;
2141 * search_right_edge() - Find right edge of DQ/DQS working phase
2142 * @write: Perform read (Stage 2) or write (Stage 3) calibration
2143 * @rank_bgn: Rank number
2144 * @write_group: Write Group
2145 * @read_group: Read Group
2146 * @start_dqs: DQS start phase
2147 * @start_dqs_en: DQS enable start phase
2148 * @sticky_bit_chk: Resulting sticky bit mask after the test
2149 * @left_edge: Left edge of the DQ/DQS phase
2150 * @right_edge: Right edge of the DQ/DQS phase
2151 * @use_read_test: Perform read test
2153 * Find right edge of DQ/DQS working phase.
2155 static int search_right_edge(const int write, const int rank_bgn,
2156 const u32 write_group, const u32 read_group,
2157 const int start_dqs, const int start_dqs_en,
2158 u32 *sticky_bit_chk,
2159 int *left_edge, int *right_edge, const u32 use_read_test)
2161 const u32 delay_max = write ? IO_IO_OUT1_DELAY_MAX : IO_IO_IN_DELAY_MAX;
2162 const u32 dqs_max = write ? IO_IO_OUT1_DELAY_MAX : IO_DQS_IN_DELAY_MAX;
2163 const u32 per_dqs = write ? RW_MGR_MEM_DQ_PER_WRITE_DQS :
2164 RW_MGR_MEM_DQ_PER_READ_DQS;
2168 for (d = 0; d <= dqs_max - start_dqs; d++) {
2169 if (write) { /* WRITE-ONLY */
2170 scc_mgr_apply_group_dqs_io_and_oct_out1(write_group,
2172 } else { /* READ-ONLY */
2173 scc_mgr_set_dqs_bus_in_delay(read_group, d + start_dqs);
2174 if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) {
2175 uint32_t delay = d + start_dqs_en;
2176 if (delay > IO_DQS_EN_DELAY_MAX)
2177 delay = IO_DQS_EN_DELAY_MAX;
2178 scc_mgr_set_dqs_en_delay(read_group, delay);
2180 scc_mgr_load_dqs(read_group);
2183 writel(0, &sdr_scc_mgr->update);
2185 stop = search_stop_check(write, d, rank_bgn, write_group,
2186 read_group, &bit_chk, sticky_bit_chk,
2189 if (write && (d == 0)) { /* WRITE-ONLY */
2190 for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
2192 * d = 0 failed, but it passed when
2193 * testing the left edge, so it must be
2194 * marginal, set it to -1
2196 if (right_edge[i] == delay_max + 1 &&
2197 left_edge[i] != delay_max + 1)
2205 for (i = 0; i < per_dqs; i++) {
2208 * Remember a passing test as
2215 * If a right edge has not
2216 * been seen yet, then a future
2217 * passing test will mark this
2218 * edge as the left edge.
2220 if (right_edge[i] == delay_max + 1)
2221 left_edge[i] = -(d + 1);
2224 * d = 0 failed, but it passed
2225 * when testing the left edge,
2226 * so it must be marginal, set
2229 if (right_edge[i] == delay_max + 1 &&
2230 left_edge[i] != delay_max + 1)
2233 * If a right edge has not been
2234 * seen yet, then a future
2235 * passing test will mark this
2236 * edge as the left edge.
2238 else if (right_edge[i] == delay_max + 1)
2239 left_edge[i] = -(d + 1);
2243 debug_cond(DLEVEL == 2, "%s:%d center[r,d=%u]: ",
2244 __func__, __LINE__, d);
2245 debug_cond(DLEVEL == 2,
2246 "bit_chk_test=%i left_edge[%u]: %d ",
2247 bit_chk & 1, i, left_edge[i]);
2248 debug_cond(DLEVEL == 2, "right_edge[%u]: %d\n", i,
2254 /* Check that all bits have a window */
2255 for (i = 0; i < per_dqs; i++) {
2256 debug_cond(DLEVEL == 2,
2257 "%s:%d write_center: left_edge[%u]: %d right_edge[%u]: %d",
2258 __func__, __LINE__, i, left_edge[i],
2260 if ((left_edge[i] == dqs_max + 1) ||
2261 (right_edge[i] == dqs_max + 1))
2262 return i + 1; /* FIXME: If we fail, retval > 0 */
2269 * get_window_mid_index() - Find the best middle setting of DQ/DQS phase
2270 * @write: Perform read (Stage 2) or write (Stage 3) calibration
2271 * @left_edge: Left edge of the DQ/DQS phase
2272 * @right_edge: Right edge of the DQ/DQS phase
2273 * @mid_min: Best DQ/DQS phase middle setting
2275 * Find index and value of the middle of the DQ/DQS working phase.
2277 static int get_window_mid_index(const int write, int *left_edge,
2278 int *right_edge, int *mid_min)
2280 const u32 per_dqs = write ? RW_MGR_MEM_DQ_PER_WRITE_DQS :
2281 RW_MGR_MEM_DQ_PER_READ_DQS;
2282 int i, mid, min_index;
2284 /* Find middle of window for each DQ bit */
2285 *mid_min = left_edge[0] - right_edge[0];
2287 for (i = 1; i < per_dqs; i++) {
2288 mid = left_edge[i] - right_edge[i];
2289 if (mid < *mid_min) {
2296 * -mid_min/2 represents the amount that we need to move DQS.
2297 * If mid_min is odd and positive we'll need to add one to make
2298 * sure the rounding in further calculations is correct (always
2299 * bias to the right), so just add 1 for all positive values.
2303 *mid_min = *mid_min / 2;
2305 debug_cond(DLEVEL == 1, "%s:%d vfifo_center: *mid_min=%d (index=%u)\n",
2306 __func__, __LINE__, *mid_min, min_index);
2311 * center_dq_windows() - Center the DQ/DQS windows
2312 * @write: Perform read (Stage 2) or write (Stage 3) calibration
2313 * @left_edge: Left edge of the DQ/DQS phase
2314 * @right_edge: Right edge of the DQ/DQS phase
2315 * @mid_min: Adjusted DQ/DQS phase middle setting
2316 * @orig_mid_min: Original DQ/DQS phase middle setting
2317 * @min_index: DQ/DQS phase middle setting index
2318 * @test_bgn: Rank number to begin the test
2319 * @dq_margin: Amount of shift for the DQ
2320 * @dqs_margin: Amount of shift for the DQS
2322 * Align the DQ/DQS windows in each group.
2324 static void center_dq_windows(const int write, int *left_edge, int *right_edge,
2325 const int mid_min, const int orig_mid_min,
2326 const int min_index, const int test_bgn,
2327 int *dq_margin, int *dqs_margin)
2329 const u32 delay_max = write ? IO_IO_OUT1_DELAY_MAX : IO_IO_IN_DELAY_MAX;
2330 const u32 per_dqs = write ? RW_MGR_MEM_DQ_PER_WRITE_DQS :
2331 RW_MGR_MEM_DQ_PER_READ_DQS;
2332 const u32 delay_off = write ? SCC_MGR_IO_OUT1_DELAY_OFFSET :
2333 SCC_MGR_IO_IN_DELAY_OFFSET;
2334 const u32 addr = SDR_PHYGRP_SCCGRP_ADDRESS | delay_off;
2336 u32 temp_dq_io_delay1, temp_dq_io_delay2;
2339 /* Initialize data for export structures */
2340 *dqs_margin = delay_max + 1;
2341 *dq_margin = delay_max + 1;
2343 /* add delay to bring centre of all DQ windows to the same "level" */
2344 for (i = 0, p = test_bgn; i < per_dqs; i++, p++) {
2345 /* Use values before divide by 2 to reduce round off error */
2346 shift_dq = (left_edge[i] - right_edge[i] -
2347 (left_edge[min_index] - right_edge[min_index]))/2 +
2348 (orig_mid_min - mid_min);
2350 debug_cond(DLEVEL == 2,
2351 "vfifo_center: before: shift_dq[%u]=%d\n",
2354 temp_dq_io_delay1 = readl(addr + (p << 2));
2355 temp_dq_io_delay2 = readl(addr + (i << 2));
2357 if (shift_dq + temp_dq_io_delay1 > delay_max)
2358 shift_dq = delay_max - temp_dq_io_delay2;
2359 else if (shift_dq + temp_dq_io_delay1 < 0)
2360 shift_dq = -temp_dq_io_delay1;
2362 debug_cond(DLEVEL == 2,
2363 "vfifo_center: after: shift_dq[%u]=%d\n",
2367 scc_mgr_set_dq_out1_delay(i, temp_dq_io_delay1 + shift_dq);
2369 scc_mgr_set_dq_in_delay(p, temp_dq_io_delay1 + shift_dq);
2373 debug_cond(DLEVEL == 2,
2374 "vfifo_center: margin[%u]=[%d,%d]\n", i,
2375 left_edge[i] - shift_dq + (-mid_min),
2376 right_edge[i] + shift_dq - (-mid_min));
2378 /* To determine values for export structures */
2379 if (left_edge[i] - shift_dq + (-mid_min) < *dq_margin)
2380 *dq_margin = left_edge[i] - shift_dq + (-mid_min);
2382 if (right_edge[i] + shift_dq - (-mid_min) < *dqs_margin)
2383 *dqs_margin = right_edge[i] + shift_dq - (-mid_min);
2389 * rw_mgr_mem_calibrate_vfifo_center() - Per-bit deskew DQ and centering
2390 * @rank_bgn: Rank number
2391 * @rw_group: Read/Write Group
2392 * @test_bgn: Rank at which the test begins
2393 * @use_read_test: Perform a read test
2394 * @update_fom: Update FOM
2396 * Per-bit deskew DQ and centering.
2398 static int rw_mgr_mem_calibrate_vfifo_center(const u32 rank_bgn,
2399 const u32 rw_group, const u32 test_bgn,
2400 const int use_read_test, const int update_fom)
2403 SDR_PHYGRP_SCCGRP_ADDRESS + SCC_MGR_DQS_IN_DELAY_OFFSET +
2406 * Store these as signed since there are comparisons with
2409 uint32_t sticky_bit_chk;
2410 int32_t left_edge[RW_MGR_MEM_DQ_PER_READ_DQS];
2411 int32_t right_edge[RW_MGR_MEM_DQ_PER_READ_DQS];
2412 int32_t orig_mid_min, mid_min;
2413 int32_t new_dqs, start_dqs, start_dqs_en, final_dqs_en;
2414 int32_t dq_margin, dqs_margin;
2418 debug("%s:%d: %u %u", __func__, __LINE__, rw_group, test_bgn);
2420 start_dqs = readl(addr);
2421 if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS)
2422 start_dqs_en = readl(addr - IO_DQS_EN_DELAY_OFFSET);
2424 /* set the left and right edge of each bit to an illegal value */
2425 /* use (IO_IO_IN_DELAY_MAX + 1) as an illegal value */
2427 for (i = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) {
2428 left_edge[i] = IO_IO_IN_DELAY_MAX + 1;
2429 right_edge[i] = IO_IO_IN_DELAY_MAX + 1;
2432 /* Search for the left edge of the window for each bit */
2433 search_left_edge(0, rank_bgn, rw_group, rw_group, test_bgn,
2435 left_edge, right_edge, use_read_test);
2438 /* Search for the right edge of the window for each bit */
2439 ret = search_right_edge(0, rank_bgn, rw_group, rw_group,
2440 start_dqs, start_dqs_en,
2442 left_edge, right_edge, use_read_test);
2445 * Restore delay chain settings before letting the loop
2446 * in rw_mgr_mem_calibrate_vfifo to retry different
2447 * dqs/ck relationships.
2449 scc_mgr_set_dqs_bus_in_delay(rw_group, start_dqs);
2450 if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS)
2451 scc_mgr_set_dqs_en_delay(rw_group, start_dqs_en);
2453 scc_mgr_load_dqs(rw_group);
2454 writel(0, &sdr_scc_mgr->update);
2456 debug_cond(DLEVEL == 1,
2457 "%s:%d vfifo_center: failed to find edge [%u]: %d %d",
2458 __func__, __LINE__, i, left_edge[i], right_edge[i]);
2459 if (use_read_test) {
2460 set_failing_group_stage(rw_group *
2461 RW_MGR_MEM_DQ_PER_READ_DQS + i,
2463 CAL_SUBSTAGE_VFIFO_CENTER);
2465 set_failing_group_stage(rw_group *
2466 RW_MGR_MEM_DQ_PER_READ_DQS + i,
2467 CAL_STAGE_VFIFO_AFTER_WRITES,
2468 CAL_SUBSTAGE_VFIFO_CENTER);
2473 min_index = get_window_mid_index(0, left_edge, right_edge, &mid_min);
2475 /* Determine the amount we can change DQS (which is -mid_min) */
2476 orig_mid_min = mid_min;
2477 new_dqs = start_dqs - mid_min;
2478 if (new_dqs > IO_DQS_IN_DELAY_MAX)
2479 new_dqs = IO_DQS_IN_DELAY_MAX;
2480 else if (new_dqs < 0)
2483 mid_min = start_dqs - new_dqs;
2484 debug_cond(DLEVEL == 1, "vfifo_center: new mid_min=%d new_dqs=%d\n",
2487 if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) {
2488 if (start_dqs_en - mid_min > IO_DQS_EN_DELAY_MAX)
2489 mid_min += start_dqs_en - mid_min - IO_DQS_EN_DELAY_MAX;
2490 else if (start_dqs_en - mid_min < 0)
2491 mid_min += start_dqs_en - mid_min;
2493 new_dqs = start_dqs - mid_min;
2495 debug_cond(DLEVEL == 1,
2496 "vfifo_center: start_dqs=%d start_dqs_en=%d new_dqs=%d mid_min=%d\n",
2498 IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS ? start_dqs_en : -1,
2501 /* Add delay to bring centre of all DQ windows to the same "level". */
2502 center_dq_windows(0, left_edge, right_edge, mid_min, orig_mid_min,
2503 min_index, test_bgn, &dq_margin, &dqs_margin);
2506 if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) {
2507 final_dqs_en = start_dqs_en - mid_min;
2508 scc_mgr_set_dqs_en_delay(rw_group, final_dqs_en);
2509 scc_mgr_load_dqs(rw_group);
2513 scc_mgr_set_dqs_bus_in_delay(rw_group, new_dqs);
2514 scc_mgr_load_dqs(rw_group);
2515 debug_cond(DLEVEL == 2,
2516 "%s:%d vfifo_center: dq_margin=%d dqs_margin=%d",
2517 __func__, __LINE__, dq_margin, dqs_margin);
2520 * Do not remove this line as it makes sure all of our decisions
2521 * have been applied. Apply the update bit.
2523 writel(0, &sdr_scc_mgr->update);
2525 if ((dq_margin < 0) || (dqs_margin < 0))
2532 * rw_mgr_mem_calibrate_guaranteed_write() - Perform guaranteed write into the device
2533 * @rw_group: Read/Write Group
2534 * @phase: DQ/DQS phase
2536 * Because initially no communication ca be reliably performed with the memory
2537 * device, the sequencer uses a guaranteed write mechanism to write data into
2538 * the memory device.
2540 static int rw_mgr_mem_calibrate_guaranteed_write(const u32 rw_group,
2545 /* Set a particular DQ/DQS phase. */
2546 scc_mgr_set_dqdqs_output_phase_all_ranks(rw_group, phase);
2548 debug_cond(DLEVEL == 1, "%s:%d guaranteed write: g=%u p=%u\n",
2549 __func__, __LINE__, rw_group, phase);
2552 * Altera EMI_RM 2015.05.04 :: Figure 1-25
2553 * Load up the patterns used by read calibration using the
2554 * current DQDQS phase.
2556 rw_mgr_mem_calibrate_read_load_patterns(0, 1);
2558 if (gbl->phy_debug_mode_flags & PHY_DEBUG_DISABLE_GUARANTEED_READ)
2562 * Altera EMI_RM 2015.05.04 :: Figure 1-26
2563 * Back-to-Back reads of the patterns used for calibration.
2565 ret = rw_mgr_mem_calibrate_read_test_patterns(0, rw_group, 1);
2567 debug_cond(DLEVEL == 1,
2568 "%s:%d Guaranteed read test failed: g=%u p=%u\n",
2569 __func__, __LINE__, rw_group, phase);
2574 * rw_mgr_mem_calibrate_dqs_enable_calibration() - DQS Enable Calibration
2575 * @rw_group: Read/Write Group
2576 * @test_bgn: Rank at which the test begins
2578 * DQS enable calibration ensures reliable capture of the DQ signal without
2579 * glitches on the DQS line.
2581 static int rw_mgr_mem_calibrate_dqs_enable_calibration(const u32 rw_group,
2585 * Altera EMI_RM 2015.05.04 :: Figure 1-27
2586 * DQS and DQS Eanble Signal Relationships.
2589 /* We start at zero, so have one less dq to devide among */
2590 const u32 delay_step = IO_IO_IN_DELAY_MAX /
2591 (RW_MGR_MEM_DQ_PER_READ_DQS - 1);
2595 debug("%s:%d (%u,%u)\n", __func__, __LINE__, rw_group, test_bgn);
2597 /* Try different dq_in_delays since the DQ path is shorter than DQS. */
2598 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
2599 r += NUM_RANKS_PER_SHADOW_REG) {
2600 for (i = 0, p = test_bgn, d = 0;
2601 i < RW_MGR_MEM_DQ_PER_READ_DQS;
2602 i++, p++, d += delay_step) {
2603 debug_cond(DLEVEL == 1,
2604 "%s:%d: g=%u r=%u i=%u p=%u d=%u\n",
2605 __func__, __LINE__, rw_group, r, i, p, d);
2607 scc_mgr_set_dq_in_delay(p, d);
2611 writel(0, &sdr_scc_mgr->update);
2615 * Try rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase across different
2616 * dq_in_delay values
2618 ret = rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(rw_group);
2620 debug_cond(DLEVEL == 1,
2621 "%s:%d: g=%u found=%u; Reseting delay chain to zero\n",
2622 __func__, __LINE__, rw_group, !ret);
2624 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
2625 r += NUM_RANKS_PER_SHADOW_REG) {
2626 scc_mgr_apply_group_dq_in_delay(test_bgn, 0);
2627 writel(0, &sdr_scc_mgr->update);
2634 * rw_mgr_mem_calibrate_dq_dqs_centering() - Centering DQ/DQS
2635 * @rw_group: Read/Write Group
2636 * @test_bgn: Rank at which the test begins
2637 * @use_read_test: Perform a read test
2638 * @update_fom: Update FOM
2640 * The centerin DQ/DQS stage attempts to align DQ and DQS signals on reads
2644 rw_mgr_mem_calibrate_dq_dqs_centering(const u32 rw_group, const u32 test_bgn,
2645 const int use_read_test,
2646 const int update_fom)
2649 int ret, grp_calibrated;
2653 * Altera EMI_RM 2015.05.04 :: Figure 1-28
2654 * Read per-bit deskew can be done on a per shadow register basis.
2657 for (rank_bgn = 0, sr = 0;
2658 rank_bgn < RW_MGR_MEM_NUMBER_OF_RANKS;
2659 rank_bgn += NUM_RANKS_PER_SHADOW_REG, sr++) {
2660 /* Check if this set of ranks should be skipped entirely. */
2661 if (param->skip_shadow_regs[sr])
2664 ret = rw_mgr_mem_calibrate_vfifo_center(rank_bgn, rw_group,
2674 if (!grp_calibrated)
2681 * rw_mgr_mem_calibrate_vfifo() - Calibrate the read valid prediction FIFO
2682 * @rw_group: Read/Write Group
2683 * @test_bgn: Rank at which the test begins
2685 * Stage 1: Calibrate the read valid prediction FIFO.
2687 * This function implements UniPHY calibration Stage 1, as explained in
2688 * detail in Altera EMI_RM 2015.05.04 , "UniPHY Calibration Stages".
2690 * - read valid prediction will consist of finding:
2691 * - DQS enable phase and DQS enable delay (DQS Enable Calibration)
2692 * - DQS input phase and DQS input delay (DQ/DQS Centering)
2693 * - we also do a per-bit deskew on the DQ lines.
2695 static int rw_mgr_mem_calibrate_vfifo(const u32 rw_group, const u32 test_bgn)
2698 uint32_t dtaps_per_ptap;
2699 uint32_t failed_substage;
2703 debug("%s:%d: %u %u\n", __func__, __LINE__, rw_group, test_bgn);
2705 /* Update info for sims */
2706 reg_file_set_group(rw_group);
2707 reg_file_set_stage(CAL_STAGE_VFIFO);
2708 reg_file_set_sub_stage(CAL_SUBSTAGE_GUARANTEED_READ);
2710 failed_substage = CAL_SUBSTAGE_GUARANTEED_READ;
2712 /* USER Determine number of delay taps for each phase tap. */
2713 dtaps_per_ptap = DIV_ROUND_UP(IO_DELAY_PER_OPA_TAP,
2714 IO_DELAY_PER_DQS_EN_DCHAIN_TAP) - 1;
2716 for (d = 0; d <= dtaps_per_ptap; d += 2) {
2718 * In RLDRAMX we may be messing the delay of pins in
2719 * the same write rw_group but outside of the current read
2720 * the rw_group, but that's ok because we haven't calibrated
2724 scc_mgr_apply_group_all_out_delay_add_all_ranks(
2728 for (p = 0; p <= IO_DQDQS_OUT_PHASE_MAX; p++) {
2729 /* 1) Guaranteed Write */
2730 ret = rw_mgr_mem_calibrate_guaranteed_write(rw_group, p);
2734 /* 2) DQS Enable Calibration */
2735 ret = rw_mgr_mem_calibrate_dqs_enable_calibration(rw_group,
2738 failed_substage = CAL_SUBSTAGE_DQS_EN_PHASE;
2742 /* 3) Centering DQ/DQS */
2744 * If doing read after write calibration, do not update
2745 * FOM now. Do it then.
2747 ret = rw_mgr_mem_calibrate_dq_dqs_centering(rw_group,
2750 failed_substage = CAL_SUBSTAGE_VFIFO_CENTER;
2759 /* Calibration Stage 1 failed. */
2760 set_failing_group_stage(rw_group, CAL_STAGE_VFIFO, failed_substage);
2763 /* Calibration Stage 1 completed OK. */
2766 * Reset the delay chains back to zero if they have moved > 1
2767 * (check for > 1 because loop will increase d even when pass in
2771 scc_mgr_zero_group(rw_group, 1);
2777 * rw_mgr_mem_calibrate_vfifo_end() - DQ/DQS Centering.
2778 * @rw_group: Read/Write Group
2779 * @test_bgn: Rank at which the test begins
2781 * Stage 3: DQ/DQS Centering.
2783 * This function implements UniPHY calibration Stage 3, as explained in
2784 * detail in Altera EMI_RM 2015.05.04 , "UniPHY Calibration Stages".
2786 static int rw_mgr_mem_calibrate_vfifo_end(const u32 rw_group,
2791 debug("%s:%d %u %u", __func__, __LINE__, rw_group, test_bgn);
2793 /* Update info for sims. */
2794 reg_file_set_group(rw_group);
2795 reg_file_set_stage(CAL_STAGE_VFIFO_AFTER_WRITES);
2796 reg_file_set_sub_stage(CAL_SUBSTAGE_VFIFO_CENTER);
2798 ret = rw_mgr_mem_calibrate_dq_dqs_centering(rw_group, test_bgn, 0, 1);
2800 set_failing_group_stage(rw_group,
2801 CAL_STAGE_VFIFO_AFTER_WRITES,
2802 CAL_SUBSTAGE_VFIFO_CENTER);
2807 * rw_mgr_mem_calibrate_lfifo() - Minimize latency
2809 * Stage 4: Minimize latency.
2811 * This function implements UniPHY calibration Stage 4, as explained in
2812 * detail in Altera EMI_RM 2015.05.04 , "UniPHY Calibration Stages".
2813 * Calibrate LFIFO to find smallest read latency.
2815 static uint32_t rw_mgr_mem_calibrate_lfifo(void)
2819 debug("%s:%d\n", __func__, __LINE__);
2821 /* Update info for sims. */
2822 reg_file_set_stage(CAL_STAGE_LFIFO);
2823 reg_file_set_sub_stage(CAL_SUBSTAGE_READ_LATENCY);
2825 /* Load up the patterns used by read calibration for all ranks */
2826 rw_mgr_mem_calibrate_read_load_patterns(0, 1);
2829 writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat);
2830 debug_cond(DLEVEL == 2, "%s:%d lfifo: read_lat=%u",
2831 __func__, __LINE__, gbl->curr_read_lat);
2833 if (!rw_mgr_mem_calibrate_read_test_all_ranks(0, NUM_READ_TESTS,
2839 * Reduce read latency and see if things are
2840 * working correctly.
2842 gbl->curr_read_lat--;
2843 } while (gbl->curr_read_lat > 0);
2845 /* Reset the fifos to get pointers to known state. */
2846 writel(0, &phy_mgr_cmd->fifo_reset);
2849 /* Add a fudge factor to the read latency that was determined */
2850 gbl->curr_read_lat += 2;
2851 writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat);
2852 debug_cond(DLEVEL == 2,
2853 "%s:%d lfifo: success: using read_lat=%u\n",
2854 __func__, __LINE__, gbl->curr_read_lat);
2856 set_failing_group_stage(0xff, CAL_STAGE_LFIFO,
2857 CAL_SUBSTAGE_READ_LATENCY);
2859 debug_cond(DLEVEL == 2,
2860 "%s:%d lfifo: failed at initial read_lat=%u\n",
2861 __func__, __LINE__, gbl->curr_read_lat);
2868 * search_window() - Search for the/part of the window with DM/DQS shift
2869 * @search_dm: If 1, search for the DM shift, if 0, search for DQS shift
2870 * @rank_bgn: Rank number
2871 * @write_group: Write Group
2872 * @bgn_curr: Current window begin
2873 * @end_curr: Current window end
2874 * @bgn_best: Current best window begin
2875 * @end_best: Current best window end
2876 * @win_best: Size of the best window
2877 * @new_dqs: New DQS value (only applicable if search_dm = 0).
2879 * Search for the/part of the window with DM/DQS shift.
2881 static void search_window(const int search_dm,
2882 const u32 rank_bgn, const u32 write_group,
2883 int *bgn_curr, int *end_curr, int *bgn_best,
2884 int *end_best, int *win_best, int new_dqs)
2887 const int max = IO_IO_OUT1_DELAY_MAX - new_dqs;
2890 /* Search for the/part of the window with DM/DQS shift. */
2891 for (di = max; di >= 0; di -= DELTA_D) {
2894 scc_mgr_apply_group_dm_out1_delay(d);
2896 /* For DQS, we go from 0...max */
2899 * Note: This only shifts DQS, so are we limiting ourselve to
2900 * width of DQ unnecessarily.
2902 scc_mgr_apply_group_dqs_io_and_oct_out1(write_group,
2906 writel(0, &sdr_scc_mgr->update);
2908 if (rw_mgr_mem_calibrate_write_test(rank_bgn, write_group, 1,
2909 PASS_ALL_BITS, &bit_chk,
2911 /* Set current end of the window. */
2912 *end_curr = search_dm ? -d : d;
2915 * If a starting edge of our window has not been seen
2916 * this is our current start of the DM window.
2918 if (*bgn_curr == IO_IO_OUT1_DELAY_MAX + 1)
2919 *bgn_curr = search_dm ? -d : d;
2922 * If current window is bigger than best seen.
2923 * Set best seen to be current window.
2925 if ((*end_curr - *bgn_curr + 1) > *win_best) {
2926 *win_best = *end_curr - *bgn_curr + 1;
2927 *bgn_best = *bgn_curr;
2928 *end_best = *end_curr;
2931 /* We just saw a failing test. Reset temp edge. */
2932 *bgn_curr = IO_IO_OUT1_DELAY_MAX + 1;
2933 *end_curr = IO_IO_OUT1_DELAY_MAX + 1;
2935 /* Early exit is only applicable to DQS. */
2940 * Early exit optimization: if the remaining delay
2941 * chain space is less than already seen largest
2942 * window we can exit.
2944 if (*win_best - 1 > IO_IO_OUT1_DELAY_MAX - new_dqs - d)
2951 * rw_mgr_mem_calibrate_writes_center() - Center all windows
2952 * @rank_bgn: Rank number
2953 * @write_group: Write group
2954 * @test_bgn: Rank at which the test begins
2956 * Center all windows. Do per-bit-deskew to possibly increase size of
2960 rw_mgr_mem_calibrate_writes_center(const u32 rank_bgn, const u32 write_group,
2966 int left_edge[RW_MGR_MEM_DQ_PER_WRITE_DQS];
2967 int right_edge[RW_MGR_MEM_DQ_PER_WRITE_DQS];
2969 int mid_min, orig_mid_min;
2970 int new_dqs, start_dqs;
2971 int dq_margin, dqs_margin, dm_margin;
2972 int bgn_curr = IO_IO_OUT1_DELAY_MAX + 1;
2973 int end_curr = IO_IO_OUT1_DELAY_MAX + 1;
2974 int bgn_best = IO_IO_OUT1_DELAY_MAX + 1;
2975 int end_best = IO_IO_OUT1_DELAY_MAX + 1;
2980 debug("%s:%d %u %u", __func__, __LINE__, write_group, test_bgn);
2984 start_dqs = readl((SDR_PHYGRP_SCCGRP_ADDRESS |
2985 SCC_MGR_IO_OUT1_DELAY_OFFSET) +
2986 (RW_MGR_MEM_DQ_PER_WRITE_DQS << 2));
2988 /* Per-bit deskew. */
2991 * Set the left and right edge of each bit to an illegal value.
2992 * Use (IO_IO_OUT1_DELAY_MAX + 1) as an illegal value.
2995 for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
2996 left_edge[i] = IO_IO_OUT1_DELAY_MAX + 1;
2997 right_edge[i] = IO_IO_OUT1_DELAY_MAX + 1;
3000 /* Search for the left edge of the window for each bit. */
3001 search_left_edge(1, rank_bgn, write_group, 0, test_bgn,
3003 left_edge, right_edge, 0);
3005 /* Search for the right edge of the window for each bit. */
3006 ret = search_right_edge(1, rank_bgn, write_group, 0,
3009 left_edge, right_edge, 0);
3011 set_failing_group_stage(test_bgn + ret - 1, CAL_STAGE_WRITES,
3012 CAL_SUBSTAGE_WRITES_CENTER);
3016 min_index = get_window_mid_index(1, left_edge, right_edge, &mid_min);
3018 /* Determine the amount we can change DQS (which is -mid_min). */
3019 orig_mid_min = mid_min;
3020 new_dqs = start_dqs;
3022 debug_cond(DLEVEL == 1,
3023 "%s:%d write_center: start_dqs=%d new_dqs=%d mid_min=%d\n",
3024 __func__, __LINE__, start_dqs, new_dqs, mid_min);
3026 /* Add delay to bring centre of all DQ windows to the same "level". */
3027 center_dq_windows(1, left_edge, right_edge, mid_min, orig_mid_min,
3028 min_index, 0, &dq_margin, &dqs_margin);
3031 scc_mgr_apply_group_dqs_io_and_oct_out1(write_group, new_dqs);
3032 writel(0, &sdr_scc_mgr->update);
3035 debug_cond(DLEVEL == 2, "%s:%d write_center: DM\n", __func__, __LINE__);
3038 * Set the left and right edge of each bit to an illegal value.
3039 * Use (IO_IO_OUT1_DELAY_MAX + 1) as an illegal value.
3041 left_edge[0] = IO_IO_OUT1_DELAY_MAX + 1;
3042 right_edge[0] = IO_IO_OUT1_DELAY_MAX + 1;
3044 /* Search for the/part of the window with DM shift. */
3045 search_window(1, rank_bgn, write_group, &bgn_curr, &end_curr,
3046 &bgn_best, &end_best, &win_best, 0);
3048 /* Reset DM delay chains to 0. */
3049 scc_mgr_apply_group_dm_out1_delay(0);
3052 * Check to see if the current window nudges up aganist 0 delay.
3053 * If so we need to continue the search by shifting DQS otherwise DQS
3054 * search begins as a new search.
3056 if (end_curr != 0) {
3057 bgn_curr = IO_IO_OUT1_DELAY_MAX + 1;
3058 end_curr = IO_IO_OUT1_DELAY_MAX + 1;
3061 /* Search for the/part of the window with DQS shifts. */
3062 search_window(0, rank_bgn, write_group, &bgn_curr, &end_curr,
3063 &bgn_best, &end_best, &win_best, new_dqs);
3065 /* Assign left and right edge for cal and reporting. */
3066 left_edge[0] = -1 * bgn_best;
3067 right_edge[0] = end_best;
3069 debug_cond(DLEVEL == 2, "%s:%d dm_calib: left=%d right=%d\n",
3070 __func__, __LINE__, left_edge[0], right_edge[0]);
3072 /* Move DQS (back to orig). */
3073 scc_mgr_apply_group_dqs_io_and_oct_out1(write_group, new_dqs);
3077 /* Find middle of window for the DM bit. */
3078 mid = (left_edge[0] - right_edge[0]) / 2;
3080 /* Only move right, since we are not moving DQS/DQ. */
3084 /* dm_marign should fail if we never find a window. */
3088 dm_margin = left_edge[0] - mid;
3090 scc_mgr_apply_group_dm_out1_delay(mid);
3091 writel(0, &sdr_scc_mgr->update);
3093 debug_cond(DLEVEL == 2,
3094 "%s:%d dm_calib: left=%d right=%d mid=%d dm_margin=%d\n",
3095 __func__, __LINE__, left_edge[0], right_edge[0],
3097 /* Export values. */
3098 gbl->fom_out += dq_margin + dqs_margin;
3100 debug_cond(DLEVEL == 2,
3101 "%s:%d write_center: dq_margin=%d dqs_margin=%d dm_margin=%d\n",
3102 __func__, __LINE__, dq_margin, dqs_margin, dm_margin);
3105 * Do not remove this line as it makes sure all of our
3106 * decisions have been applied.
3108 writel(0, &sdr_scc_mgr->update);
3110 if ((dq_margin < 0) || (dqs_margin < 0) || (dm_margin < 0))
3117 * rw_mgr_mem_calibrate_writes() - Write Calibration Part One
3118 * @rank_bgn: Rank number
3119 * @group: Read/Write Group
3120 * @test_bgn: Rank at which the test begins
3122 * Stage 2: Write Calibration Part One.
3124 * This function implements UniPHY calibration Stage 2, as explained in
3125 * detail in Altera EMI_RM 2015.05.04 , "UniPHY Calibration Stages".
3127 static int rw_mgr_mem_calibrate_writes(const u32 rank_bgn, const u32 group,
3132 /* Update info for sims */
3133 debug("%s:%d %u %u\n", __func__, __LINE__, group, test_bgn);
3135 reg_file_set_group(group);
3136 reg_file_set_stage(CAL_STAGE_WRITES);
3137 reg_file_set_sub_stage(CAL_SUBSTAGE_WRITES_CENTER);
3139 ret = rw_mgr_mem_calibrate_writes_center(rank_bgn, group, test_bgn);
3141 set_failing_group_stage(group, CAL_STAGE_WRITES,
3142 CAL_SUBSTAGE_WRITES_CENTER);
3148 * mem_precharge_and_activate() - Precharge all banks and activate
3150 * Precharge all banks and activate row 0 in bank "000..." and bank "111...".
3152 static void mem_precharge_and_activate(void)
3156 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r++) {
3157 /* Test if the rank should be skipped. */
3158 if (param->skip_ranks[r])
3162 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_OFF);
3164 /* Precharge all banks. */
3165 writel(RW_MGR_PRECHARGE_ALL, SDR_PHYGRP_RWMGRGRP_ADDRESS |
3166 RW_MGR_RUN_SINGLE_GROUP_OFFSET);
3168 writel(0x0F, &sdr_rw_load_mgr_regs->load_cntr0);
3169 writel(RW_MGR_ACTIVATE_0_AND_1_WAIT1,
3170 &sdr_rw_load_jump_mgr_regs->load_jump_add0);
3172 writel(0x0F, &sdr_rw_load_mgr_regs->load_cntr1);
3173 writel(RW_MGR_ACTIVATE_0_AND_1_WAIT2,
3174 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
3176 /* Activate rows. */
3177 writel(RW_MGR_ACTIVATE_0_AND_1, SDR_PHYGRP_RWMGRGRP_ADDRESS |
3178 RW_MGR_RUN_SINGLE_GROUP_OFFSET);
3183 * mem_init_latency() - Configure memory RLAT and WLAT settings
3185 * Configure memory RLAT and WLAT parameters.
3187 static void mem_init_latency(void)
3190 * For AV/CV, LFIFO is hardened and always runs at full rate
3191 * so max latency in AFI clocks, used here, is correspondingly
3194 const u32 max_latency = (1 << MAX_LATENCY_COUNT_WIDTH) - 1;
3197 debug("%s:%d\n", __func__, __LINE__);
3200 * Read in write latency.
3201 * WL for Hard PHY does not include additive latency.
3203 wlat = readl(&data_mgr->t_wl_add);
3204 wlat += readl(&data_mgr->mem_t_add);
3206 gbl->rw_wl_nop_cycles = wlat - 1;
3208 /* Read in readl latency. */
3209 rlat = readl(&data_mgr->t_rl_add);
3211 /* Set a pretty high read latency initially. */
3212 gbl->curr_read_lat = rlat + 16;
3213 if (gbl->curr_read_lat > max_latency)
3214 gbl->curr_read_lat = max_latency;
3216 writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat);
3218 /* Advertise write latency. */
3219 writel(wlat, &phy_mgr_cfg->afi_wlat);
3223 * @mem_skip_calibrate() - Set VFIFO and LFIFO to instant-on settings
3225 * Set VFIFO and LFIFO to instant-on settings in skip calibration mode.
3227 static void mem_skip_calibrate(void)
3229 uint32_t vfifo_offset;
3232 debug("%s:%d\n", __func__, __LINE__);
3233 /* Need to update every shadow register set used by the interface */
3234 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
3235 r += NUM_RANKS_PER_SHADOW_REG) {
3237 * Set output phase alignment settings appropriate for
3240 for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) {
3241 scc_mgr_set_dqs_en_phase(i, 0);
3242 #if IO_DLL_CHAIN_LENGTH == 6
3243 scc_mgr_set_dqdqs_output_phase(i, 6);
3245 scc_mgr_set_dqdqs_output_phase(i, 7);
3250 * Write data arrives to the I/O two cycles before write
3251 * latency is reached (720 deg).
3252 * -> due to bit-slip in a/c bus
3253 * -> to allow board skew where dqs is longer than ck
3254 * -> how often can this happen!?
3255 * -> can claim back some ptaps for high freq
3256 * support if we can relax this, but i digress...
3258 * The write_clk leads mem_ck by 90 deg
3259 * The minimum ptap of the OPA is 180 deg
3260 * Each ptap has (360 / IO_DLL_CHAIN_LENGH) deg of delay
3261 * The write_clk is always delayed by 2 ptaps
3263 * Hence, to make DQS aligned to CK, we need to delay
3265 * (720 - 90 - 180 - 2 * (360 / IO_DLL_CHAIN_LENGTH))
3267 * Dividing the above by (360 / IO_DLL_CHAIN_LENGTH)
3268 * gives us the number of ptaps, which simplies to:
3270 * (1.25 * IO_DLL_CHAIN_LENGTH - 2)
3272 scc_mgr_set_dqdqs_output_phase(i,
3273 1.25 * IO_DLL_CHAIN_LENGTH - 2);
3275 writel(0xff, &sdr_scc_mgr->dqs_ena);
3276 writel(0xff, &sdr_scc_mgr->dqs_io_ena);
3278 for (i = 0; i < RW_MGR_MEM_IF_WRITE_DQS_WIDTH; i++) {
3279 writel(i, SDR_PHYGRP_SCCGRP_ADDRESS |
3280 SCC_MGR_GROUP_COUNTER_OFFSET);
3282 writel(0xff, &sdr_scc_mgr->dq_ena);
3283 writel(0xff, &sdr_scc_mgr->dm_ena);
3284 writel(0, &sdr_scc_mgr->update);
3287 /* Compensate for simulation model behaviour */
3288 for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) {
3289 scc_mgr_set_dqs_bus_in_delay(i, 10);
3290 scc_mgr_load_dqs(i);
3292 writel(0, &sdr_scc_mgr->update);
3295 * ArriaV has hard FIFOs that can only be initialized by incrementing
3298 vfifo_offset = CALIB_VFIFO_OFFSET;
3299 for (j = 0; j < vfifo_offset; j++)
3300 writel(0xff, &phy_mgr_cmd->inc_vfifo_hard_phy);
3301 writel(0, &phy_mgr_cmd->fifo_reset);
3304 * For Arria V and Cyclone V with hard LFIFO, we get the skip-cal
3305 * setting from generation-time constant.
3307 gbl->curr_read_lat = CALIB_LFIFO_OFFSET;
3308 writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat);
3312 * mem_calibrate() - Memory calibration entry point.
3314 * Perform memory calibration.
3316 static uint32_t mem_calibrate(void)
3319 uint32_t rank_bgn, sr;
3320 uint32_t write_group, write_test_bgn;
3321 uint32_t read_group, read_test_bgn;
3322 uint32_t run_groups, current_run;
3323 uint32_t failing_groups = 0;
3324 uint32_t group_failed = 0;
3326 const u32 rwdqs_ratio = RW_MGR_MEM_IF_READ_DQS_WIDTH /
3327 RW_MGR_MEM_IF_WRITE_DQS_WIDTH;
3329 debug("%s:%d\n", __func__, __LINE__);
3331 /* Initialize the data settings */
3332 gbl->error_substage = CAL_SUBSTAGE_NIL;
3333 gbl->error_stage = CAL_STAGE_NIL;
3334 gbl->error_group = 0xff;
3338 /* Initialize WLAT and RLAT. */
3341 /* Initialize bit slips. */
3342 mem_precharge_and_activate();
3344 for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) {
3345 writel(i, SDR_PHYGRP_SCCGRP_ADDRESS |
3346 SCC_MGR_GROUP_COUNTER_OFFSET);
3347 /* Only needed once to set all groups, pins, DQ, DQS, DM. */
3349 scc_mgr_set_hhp_extras();
3351 scc_set_bypass_mode(i);
3354 /* Calibration is skipped. */
3355 if ((dyn_calib_steps & CALIB_SKIP_ALL) == CALIB_SKIP_ALL) {
3357 * Set VFIFO and LFIFO to instant-on settings in skip
3360 mem_skip_calibrate();
3363 * Do not remove this line as it makes sure all of our
3364 * decisions have been applied.
3366 writel(0, &sdr_scc_mgr->update);
3370 /* Calibration is not skipped. */
3371 for (i = 0; i < NUM_CALIB_REPEAT; i++) {
3373 * Zero all delay chain/phase settings for all
3374 * groups and all shadow register sets.
3378 run_groups = ~param->skip_groups;
3380 for (write_group = 0, write_test_bgn = 0; write_group
3381 < RW_MGR_MEM_IF_WRITE_DQS_WIDTH; write_group++,
3382 write_test_bgn += RW_MGR_MEM_DQ_PER_WRITE_DQS) {
3384 /* Initialize the group failure */
3387 current_run = run_groups & ((1 <<
3388 RW_MGR_NUM_DQS_PER_WRITE_GROUP) - 1);
3389 run_groups = run_groups >>
3390 RW_MGR_NUM_DQS_PER_WRITE_GROUP;
3392 if (current_run == 0)
3395 writel(write_group, SDR_PHYGRP_SCCGRP_ADDRESS |
3396 SCC_MGR_GROUP_COUNTER_OFFSET);
3397 scc_mgr_zero_group(write_group, 0);
3399 for (read_group = write_group * rwdqs_ratio,
3401 read_group < (write_group + 1) * rwdqs_ratio;
3403 read_test_bgn += RW_MGR_MEM_DQ_PER_READ_DQS) {
3404 if (STATIC_CALIB_STEPS & CALIB_SKIP_VFIFO)
3407 /* Calibrate the VFIFO */
3408 if (rw_mgr_mem_calibrate_vfifo(read_group,
3412 if (!(gbl->phy_debug_mode_flags & PHY_DEBUG_SWEEP_ALL_GROUPS))
3415 /* The group failed, we're done. */
3419 /* Calibrate the output side */
3420 for (rank_bgn = 0, sr = 0;
3421 rank_bgn < RW_MGR_MEM_NUMBER_OF_RANKS;
3422 rank_bgn += NUM_RANKS_PER_SHADOW_REG, sr++) {
3423 if (STATIC_CALIB_STEPS & CALIB_SKIP_WRITES)
3426 /* Not needed in quick mode! */
3427 if (STATIC_CALIB_STEPS & CALIB_SKIP_DELAY_SWEEPS)
3431 * Determine if this set of ranks
3432 * should be skipped entirely.
3434 if (param->skip_shadow_regs[sr])
3437 /* Calibrate WRITEs */
3438 if (!rw_mgr_mem_calibrate_writes(rank_bgn,
3439 write_group, write_test_bgn))
3443 if (!(gbl->phy_debug_mode_flags & PHY_DEBUG_SWEEP_ALL_GROUPS))
3447 /* Some group failed, we're done. */
3451 for (read_group = write_group * rwdqs_ratio,
3453 read_group < (write_group + 1) * rwdqs_ratio;
3455 read_test_bgn += RW_MGR_MEM_DQ_PER_READ_DQS) {
3456 if (STATIC_CALIB_STEPS & CALIB_SKIP_WRITES)
3459 if (!rw_mgr_mem_calibrate_vfifo_end(read_group,
3463 if (!(gbl->phy_debug_mode_flags & PHY_DEBUG_SWEEP_ALL_GROUPS))
3466 /* The group failed, we're done. */
3470 /* No group failed, continue as usual. */
3473 grp_failed: /* A group failed, increment the counter. */
3478 * USER If there are any failing groups then report
3481 if (failing_groups != 0)
3484 if (STATIC_CALIB_STEPS & CALIB_SKIP_LFIFO)
3488 * If we're skipping groups as part of debug,
3489 * don't calibrate LFIFO.
3491 if (param->skip_groups != 0)
3494 /* Calibrate the LFIFO */
3495 if (!rw_mgr_mem_calibrate_lfifo())
3500 * Do not remove this line as it makes sure all of our decisions
3501 * have been applied.
3503 writel(0, &sdr_scc_mgr->update);
3508 * run_mem_calibrate() - Perform memory calibration
3510 * This function triggers the entire memory calibration procedure.
3512 static int run_mem_calibrate(void)
3516 debug("%s:%d\n", __func__, __LINE__);
3518 /* Reset pass/fail status shown on afi_cal_success/fail */
3519 writel(PHY_MGR_CAL_RESET, &phy_mgr_cfg->cal_status);
3521 /* Stop tracking manager. */
3522 clrbits_le32(&sdr_ctrl->ctrl_cfg, 1 << 22);
3524 phy_mgr_initialize();
3525 rw_mgr_mem_initialize();
3527 /* Perform the actual memory calibration. */
3528 pass = mem_calibrate();
3530 mem_precharge_and_activate();
3531 writel(0, &phy_mgr_cmd->fifo_reset);
3534 rw_mgr_mem_handoff();
3536 * In Hard PHY this is a 2-bit control:
3538 * 1: DDIO Mux Select
3540 writel(0x2, &phy_mgr_cfg->mux_sel);
3542 /* Start tracking manager. */
3543 setbits_le32(&sdr_ctrl->ctrl_cfg, 1 << 22);
3549 * debug_mem_calibrate() - Report result of memory calibration
3550 * @pass: Value indicating whether calibration passed or failed
3552 * This function reports the results of the memory calibration
3553 * and writes debug information into the register file.
3555 static void debug_mem_calibrate(int pass)
3557 uint32_t debug_info;
3560 printf("%s: CALIBRATION PASSED\n", __FILE__);
3565 if (gbl->fom_in > 0xff)
3568 if (gbl->fom_out > 0xff)
3569 gbl->fom_out = 0xff;
3571 /* Update the FOM in the register file */
3572 debug_info = gbl->fom_in;
3573 debug_info |= gbl->fom_out << 8;
3574 writel(debug_info, &sdr_reg_file->fom);
3576 writel(debug_info, &phy_mgr_cfg->cal_debug_info);
3577 writel(PHY_MGR_CAL_SUCCESS, &phy_mgr_cfg->cal_status);
3579 printf("%s: CALIBRATION FAILED\n", __FILE__);
3581 debug_info = gbl->error_stage;
3582 debug_info |= gbl->error_substage << 8;
3583 debug_info |= gbl->error_group << 16;
3585 writel(debug_info, &sdr_reg_file->failing_stage);
3586 writel(debug_info, &phy_mgr_cfg->cal_debug_info);
3587 writel(PHY_MGR_CAL_FAIL, &phy_mgr_cfg->cal_status);
3589 /* Update the failing group/stage in the register file */
3590 debug_info = gbl->error_stage;
3591 debug_info |= gbl->error_substage << 8;
3592 debug_info |= gbl->error_group << 16;
3593 writel(debug_info, &sdr_reg_file->failing_stage);
3596 printf("%s: Calibration complete\n", __FILE__);
3600 * hc_initialize_rom_data() - Initialize ROM data
3602 * Initialize ROM data.
3604 static void hc_initialize_rom_data(void)
3608 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_INST_ROM_WRITE_OFFSET;
3609 for (i = 0; i < ARRAY_SIZE(inst_rom_init); i++)
3610 writel(inst_rom_init[i], addr + (i << 2));
3612 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_AC_ROM_WRITE_OFFSET;
3613 for (i = 0; i < ARRAY_SIZE(ac_rom_init); i++)
3614 writel(ac_rom_init[i], addr + (i << 2));
3618 * initialize_reg_file() - Initialize SDR register file
3620 * Initialize SDR register file.
3622 static void initialize_reg_file(void)
3624 /* Initialize the register file with the correct data */
3625 writel(REG_FILE_INIT_SEQ_SIGNATURE, &sdr_reg_file->signature);
3626 writel(0, &sdr_reg_file->debug_data_addr);
3627 writel(0, &sdr_reg_file->cur_stage);
3628 writel(0, &sdr_reg_file->fom);
3629 writel(0, &sdr_reg_file->failing_stage);
3630 writel(0, &sdr_reg_file->debug1);
3631 writel(0, &sdr_reg_file->debug2);
3635 * initialize_hps_phy() - Initialize HPS PHY
3637 * Initialize HPS PHY.
3639 static void initialize_hps_phy(void)
3643 * Tracking also gets configured here because it's in the
3646 uint32_t trk_sample_count = 7500;
3647 uint32_t trk_long_idle_sample_count = (10 << 16) | 100;
3649 * Format is number of outer loops in the 16 MSB, sample
3654 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ACDELAYEN_SET(2);
3655 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQDELAYEN_SET(1);
3656 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSDELAYEN_SET(1);
3657 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSLOGICDELAYEN_SET(1);
3658 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_RESETDELAYEN_SET(0);
3659 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_LPDDRDIS_SET(1);
3661 * This field selects the intrinsic latency to RDATA_EN/FULL path.
3662 * 00-bypass, 01- add 5 cycles, 10- add 10 cycles, 11- add 15 cycles.
3664 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ADDLATSEL_SET(0);
3665 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_SAMPLECOUNT_19_0_SET(
3667 writel(reg, &sdr_ctrl->phy_ctrl0);
3670 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_SAMPLECOUNT_31_20_SET(
3672 SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_SAMPLECOUNT_19_0_WIDTH);
3673 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_LONGIDLESAMPLECOUNT_19_0_SET(
3674 trk_long_idle_sample_count);
3675 writel(reg, &sdr_ctrl->phy_ctrl1);
3678 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_2_LONGIDLESAMPLECOUNT_31_20_SET(
3679 trk_long_idle_sample_count >>
3680 SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_LONGIDLESAMPLECOUNT_19_0_WIDTH);
3681 writel(reg, &sdr_ctrl->phy_ctrl2);
3685 * initialize_tracking() - Initialize tracking
3687 * Initialize the register file with usable initial data.
3689 static void initialize_tracking(void)
3692 * Initialize the register file with the correct data.
3693 * Compute usable version of value in case we skip full
3694 * computation later.
3696 writel(DIV_ROUND_UP(IO_DELAY_PER_OPA_TAP, IO_DELAY_PER_DCHAIN_TAP) - 1,
3697 &sdr_reg_file->dtaps_per_ptap);
3699 /* trk_sample_count */
3700 writel(7500, &sdr_reg_file->trk_sample_count);
3702 /* longidle outer loop [15:0] */
3703 writel((10 << 16) | (100 << 0), &sdr_reg_file->trk_longidle);
3706 * longidle sample count [31:24]
3707 * trfc, worst case of 933Mhz 4Gb [23:16]
3708 * trcd, worst case [15:8]
3711 writel((243 << 24) | (14 << 16) | (10 << 8) | (4 << 0),
3712 &sdr_reg_file->delays);
3715 writel((RW_MGR_IDLE << 24) | (RW_MGR_ACTIVATE_1 << 16) |
3716 (RW_MGR_SGLE_READ << 8) | (RW_MGR_PRECHARGE_ALL << 0),
3717 &sdr_reg_file->trk_rw_mgr_addr);
3719 writel(RW_MGR_MEM_IF_READ_DQS_WIDTH,
3720 &sdr_reg_file->trk_read_dqs_width);
3723 writel((RW_MGR_REFRESH_ALL << 24) | (1000 << 0),
3724 &sdr_reg_file->trk_rfsh);
3727 int sdram_calibration_full(void)
3729 struct param_type my_param;
3730 struct gbl_type my_gbl;
3733 memset(&my_param, 0, sizeof(my_param));
3734 memset(&my_gbl, 0, sizeof(my_gbl));
3739 /* Set the calibration enabled by default */
3740 gbl->phy_debug_mode_flags |= PHY_DEBUG_ENABLE_CAL_RPT;
3742 * Only sweep all groups (regardless of fail state) by default
3743 * Set enabled read test by default.
3745 #if DISABLE_GUARANTEED_READ
3746 gbl->phy_debug_mode_flags |= PHY_DEBUG_DISABLE_GUARANTEED_READ;
3748 /* Initialize the register file */
3749 initialize_reg_file();
3751 /* Initialize any PHY CSR */
3752 initialize_hps_phy();
3754 scc_mgr_initialize();
3756 initialize_tracking();
3758 printf("%s: Preparing to start memory calibration\n", __FILE__);
3760 debug("%s:%d\n", __func__, __LINE__);
3761 debug_cond(DLEVEL == 1,
3762 "DDR3 FULL_RATE ranks=%u cs/dimm=%u dq/dqs=%u,%u vg/dqs=%u,%u ",
3763 RW_MGR_MEM_NUMBER_OF_RANKS, RW_MGR_MEM_NUMBER_OF_CS_PER_DIMM,
3764 RW_MGR_MEM_DQ_PER_READ_DQS, RW_MGR_MEM_DQ_PER_WRITE_DQS,
3765 RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS,
3766 RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS);
3767 debug_cond(DLEVEL == 1,
3768 "dqs=%u,%u dq=%u dm=%u ptap_delay=%u dtap_delay=%u ",
3769 RW_MGR_MEM_IF_READ_DQS_WIDTH, RW_MGR_MEM_IF_WRITE_DQS_WIDTH,
3770 RW_MGR_MEM_DATA_WIDTH, RW_MGR_MEM_DATA_MASK_WIDTH,
3771 IO_DELAY_PER_OPA_TAP, IO_DELAY_PER_DCHAIN_TAP);
3772 debug_cond(DLEVEL == 1, "dtap_dqsen_delay=%u, dll=%u",
3773 IO_DELAY_PER_DQS_EN_DCHAIN_TAP, IO_DLL_CHAIN_LENGTH);
3774 debug_cond(DLEVEL == 1, "max values: en_p=%u dqdqs_p=%u en_d=%u dqs_in_d=%u ",
3775 IO_DQS_EN_PHASE_MAX, IO_DQDQS_OUT_PHASE_MAX,
3776 IO_DQS_EN_DELAY_MAX, IO_DQS_IN_DELAY_MAX);
3777 debug_cond(DLEVEL == 1, "io_in_d=%u io_out1_d=%u io_out2_d=%u ",
3778 IO_IO_IN_DELAY_MAX, IO_IO_OUT1_DELAY_MAX,
3779 IO_IO_OUT2_DELAY_MAX);
3780 debug_cond(DLEVEL == 1, "dqs_in_reserve=%u dqs_out_reserve=%u\n",
3781 IO_DQS_IN_RESERVE, IO_DQS_OUT_RESERVE);
3783 hc_initialize_rom_data();
3785 /* update info for sims */
3786 reg_file_set_stage(CAL_STAGE_NIL);
3787 reg_file_set_group(0);
3790 * Load global needed for those actions that require
3791 * some dynamic calibration support.
3793 dyn_calib_steps = STATIC_CALIB_STEPS;
3795 * Load global to allow dynamic selection of delay loop settings
3796 * based on calibration mode.
3798 if (!(dyn_calib_steps & CALIB_SKIP_DELAY_LOOPS))
3799 skip_delay_mask = 0xff;
3801 skip_delay_mask = 0x0;
3803 pass = run_mem_calibrate();
3804 debug_mem_calibrate(pass);