1 // SPDX-License-Identifier: GPL-2.0+
6 * Texas Instruments, <www.ti.com>
8 * Aneesh V <aneesh@ti.com>
16 #include <asm/arch/clock.h>
17 #include <asm/arch/sys_proto.h>
18 #include <asm/omap_common.h>
19 #include <asm/omap_sec_common.h>
20 #include <asm/utils.h>
21 #include <linux/compiler.h>
22 #include <asm/ti-common/ti-edma3.h>
24 static int emif1_enabled = -1, emif2_enabled = -1;
26 void set_lpmode_selfrefresh(u32 base)
28 struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
31 reg = readl(&emif->emif_pwr_mgmt_ctrl);
32 reg &= ~EMIF_REG_LP_MODE_MASK;
33 reg |= LP_MODE_SELF_REFRESH << EMIF_REG_LP_MODE_SHIFT;
34 reg &= ~EMIF_REG_SR_TIM_MASK;
35 writel(reg, &emif->emif_pwr_mgmt_ctrl);
37 /* dummy read for the new SR_TIM to be loaded */
38 readl(&emif->emif_pwr_mgmt_ctrl);
41 void force_emif_self_refresh()
43 set_lpmode_selfrefresh(EMIF1_BASE);
45 set_lpmode_selfrefresh(EMIF2_BASE);
48 inline u32 emif_num(u32 base)
50 if (base == EMIF1_BASE)
52 else if (base == EMIF2_BASE)
58 static inline u32 get_mr(u32 base, u32 cs, u32 mr_addr)
61 struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
63 mr_addr |= cs << EMIF_REG_CS_SHIFT;
64 writel(mr_addr, &emif->emif_lpddr2_mode_reg_cfg);
65 if (omap_revision() == OMAP4430_ES2_0)
66 mr = readl(&emif->emif_lpddr2_mode_reg_data_es2);
68 mr = readl(&emif->emif_lpddr2_mode_reg_data);
69 debug("get_mr: EMIF%d cs %d mr %08x val 0x%x\n", emif_num(base),
71 if (((mr & 0x0000ff00) >> 8) == (mr & 0xff) &&
72 ((mr & 0x00ff0000) >> 16) == (mr & 0xff) &&
73 ((mr & 0xff000000) >> 24) == (mr & 0xff))
79 static inline void set_mr(u32 base, u32 cs, u32 mr_addr, u32 mr_val)
81 struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
83 mr_addr |= cs << EMIF_REG_CS_SHIFT;
84 writel(mr_addr, &emif->emif_lpddr2_mode_reg_cfg);
85 writel(mr_val, &emif->emif_lpddr2_mode_reg_data);
88 void emif_reset_phy(u32 base)
90 struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
93 iodft = readl(&emif->emif_iodft_tlgc);
94 iodft |= EMIF_REG_RESET_PHY_MASK;
95 writel(iodft, &emif->emif_iodft_tlgc);
98 static void do_lpddr2_init(u32 base, u32 cs)
101 const struct lpddr2_mr_regs *mr_regs;
103 get_lpddr2_mr_regs(&mr_regs);
104 /* Wait till device auto initialization is complete */
105 while (get_mr(base, cs, LPDDR2_MR0) & LPDDR2_MR0_DAI_MASK)
107 set_mr(base, cs, LPDDR2_MR10, mr_regs->mr10);
110 * Enough loops assuming a maximum of 2GHz
115 set_mr(base, cs, LPDDR2_MR1, mr_regs->mr1);
116 set_mr(base, cs, LPDDR2_MR16, mr_regs->mr16);
119 * Enable refresh along with writing MR2
120 * Encoding of RL in MR2 is (RL - 2)
122 mr_addr = LPDDR2_MR2 | EMIF_REG_REFRESH_EN_MASK;
123 set_mr(base, cs, mr_addr, mr_regs->mr2);
125 if (mr_regs->mr3 > 0)
126 set_mr(base, cs, LPDDR2_MR3, mr_regs->mr3);
129 static void lpddr2_init(u32 base, const struct emif_regs *regs)
131 struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
134 clrbits_le32(&emif->emif_lpddr2_nvm_config, EMIF_REG_CS1NVMEN_MASK);
137 * Keep REG_INITREF_DIS = 1 to prevent re-initialization of SDRAM
138 * when EMIF_SDRAM_CONFIG register is written
140 setbits_le32(&emif->emif_sdram_ref_ctrl, EMIF_REG_INITREF_DIS_MASK);
143 * Set the SDRAM_CONFIG and PHY_CTRL for the
144 * un-locked frequency & default RL
146 writel(regs->sdram_config_init, &emif->emif_sdram_config);
147 writel(regs->emif_ddr_phy_ctlr_1_init, &emif->emif_ddr_phy_ctrl_1);
149 do_ext_phy_settings(base, regs);
151 do_lpddr2_init(base, CS0);
152 if (regs->sdram_config & EMIF_REG_EBANK_MASK)
153 do_lpddr2_init(base, CS1);
155 writel(regs->sdram_config, &emif->emif_sdram_config);
156 writel(regs->emif_ddr_phy_ctlr_1, &emif->emif_ddr_phy_ctrl_1);
158 /* Enable refresh now */
159 clrbits_le32(&emif->emif_sdram_ref_ctrl, EMIF_REG_INITREF_DIS_MASK);
163 __weak void do_ext_phy_settings(u32 base, const struct emif_regs *regs)
167 void emif_update_timings(u32 base, const struct emif_regs *regs)
169 struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
172 writel(regs->ref_ctrl, &emif->emif_sdram_ref_ctrl_shdw);
174 writel(regs->ref_ctrl_final, &emif->emif_sdram_ref_ctrl_shdw);
176 writel(regs->sdram_tim1, &emif->emif_sdram_tim_1_shdw);
177 writel(regs->sdram_tim2, &emif->emif_sdram_tim_2_shdw);
178 writel(regs->sdram_tim3, &emif->emif_sdram_tim_3_shdw);
179 if (omap_revision() == OMAP4430_ES1_0) {
180 /* ES1 bug EMIF should be in force idle during freq_update */
181 writel(0, &emif->emif_pwr_mgmt_ctrl);
183 writel(EMIF_PWR_MGMT_CTRL, &emif->emif_pwr_mgmt_ctrl);
184 writel(EMIF_PWR_MGMT_CTRL_SHDW, &emif->emif_pwr_mgmt_ctrl_shdw);
186 writel(regs->read_idle_ctrl, &emif->emif_read_idlectrl_shdw);
187 writel(regs->zq_config, &emif->emif_zq_config);
188 writel(regs->temp_alert_config, &emif->emif_temp_alert_config);
189 writel(regs->emif_ddr_phy_ctlr_1, &emif->emif_ddr_phy_ctrl_1_shdw);
191 if ((omap_revision() >= OMAP5430_ES1_0) || is_dra7xx()) {
192 writel(EMIF_L3_CONFIG_VAL_SYS_10_MPU_5_LL_0,
193 &emif->emif_l3_config);
194 } else if (omap_revision() >= OMAP4460_ES1_0) {
195 writel(EMIF_L3_CONFIG_VAL_SYS_10_MPU_3_LL_0,
196 &emif->emif_l3_config);
198 writel(EMIF_L3_CONFIG_VAL_SYS_10_LL_0,
199 &emif->emif_l3_config);
203 #ifndef CONFIG_OMAP44XX
204 static void omap5_ddr3_leveling(u32 base, const struct emif_regs *regs)
206 struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
208 /* keep sdram in self-refresh */
209 writel(((LP_MODE_SELF_REFRESH << EMIF_REG_LP_MODE_SHIFT)
210 & EMIF_REG_LP_MODE_MASK), &emif->emif_pwr_mgmt_ctrl);
214 * Set invert_clkout (if activated)--DDR_PHYCTRL_1
215 * Invert clock adds an additional half cycle delay on the
216 * command interface. The additional half cycle, is usually
217 * meant to enable leveling in the situation that DQS is later
218 * than CK on the board.It also helps provide some additional
219 * margin for leveling.
221 writel(regs->emif_ddr_phy_ctlr_1,
222 &emif->emif_ddr_phy_ctrl_1);
224 writel(regs->emif_ddr_phy_ctlr_1,
225 &emif->emif_ddr_phy_ctrl_1_shdw);
228 writel(((LP_MODE_DISABLE << EMIF_REG_LP_MODE_SHIFT)
229 & EMIF_REG_LP_MODE_MASK), &emif->emif_pwr_mgmt_ctrl);
231 /* Launch Full leveling */
232 writel(DDR3_FULL_LVL, &emif->emif_rd_wr_lvl_ctl);
234 /* Wait till full leveling is complete */
235 readl(&emif->emif_rd_wr_lvl_ctl);
238 /* Read data eye leveling no of samples */
239 config_data_eye_leveling_samples(base);
242 * Launch 8 incremental WR_LVL- to compensate for
245 writel(0x2 << EMIF_REG_WRLVLINC_INT_SHIFT,
246 &emif->emif_rd_wr_lvl_ctl);
250 /* Launch Incremental leveling */
251 writel(DDR3_INC_LVL, &emif->emif_rd_wr_lvl_ctl);
255 static void update_hwleveling_output(u32 base, const struct emif_regs *regs)
257 struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
258 u32 *emif_ext_phy_ctrl_reg, *emif_phy_status;
261 emif_phy_status = (u32 *)&emif->emif_ddr_phy_status[6];
262 phy = readl(&emif->emif_ddr_phy_ctrl_1);
264 /* Update PHY_REG_RDDQS_RATIO */
265 emif_ext_phy_ctrl_reg = (u32 *)&emif->emif_ddr_ext_phy_ctrl_7;
266 if (!(phy & EMIF_DDR_PHY_CTRL_1_RDLVL_MASK_MASK))
267 for (i = 0; i < PHY_RDDQS_RATIO_REGS; i++) {
268 reg = readl(emif_phy_status++);
269 writel(reg, emif_ext_phy_ctrl_reg++);
270 writel(reg, emif_ext_phy_ctrl_reg++);
273 /* Update PHY_REG_FIFO_WE_SLAVE_RATIO */
274 emif_ext_phy_ctrl_reg = (u32 *)&emif->emif_ddr_ext_phy_ctrl_2;
275 emif_phy_status = (u32 *)&emif->emif_ddr_phy_status[11];
276 if (!(phy & EMIF_DDR_PHY_CTRL_1_RDLVLGATE_MASK_MASK))
277 for (i = 0; i < PHY_FIFO_WE_SLAVE_RATIO_REGS; i++) {
278 reg = readl(emif_phy_status++);
279 writel(reg, emif_ext_phy_ctrl_reg++);
280 writel(reg, emif_ext_phy_ctrl_reg++);
283 /* Update PHY_REG_WR_DQ/DQS_SLAVE_RATIO */
284 emif_ext_phy_ctrl_reg = (u32 *)&emif->emif_ddr_ext_phy_ctrl_12;
285 emif_phy_status = (u32 *)&emif->emif_ddr_phy_status[16];
286 if (!(phy & EMIF_DDR_PHY_CTRL_1_WRLVL_MASK_MASK))
287 for (i = 0; i < PHY_REG_WR_DQ_SLAVE_RATIO_REGS; i++) {
288 reg = readl(emif_phy_status++);
289 writel(reg, emif_ext_phy_ctrl_reg++);
290 writel(reg, emif_ext_phy_ctrl_reg++);
293 /* Disable Leveling */
294 writel(regs->emif_ddr_phy_ctlr_1, &emif->emif_ddr_phy_ctrl_1);
295 writel(regs->emif_ddr_phy_ctlr_1, &emif->emif_ddr_phy_ctrl_1_shdw);
296 writel(0x0, &emif->emif_rd_wr_lvl_rmp_ctl);
299 static void dra7_ddr3_leveling(u32 base, const struct emif_regs *regs)
301 struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
303 /* Clear Error Status */
304 clrsetbits_le32(&emif->emif_ddr_ext_phy_ctrl_36,
305 EMIF_REG_PHY_FIFO_WE_IN_MISALINED_CLR,
306 EMIF_REG_PHY_FIFO_WE_IN_MISALINED_CLR);
308 clrsetbits_le32(&emif->emif_ddr_ext_phy_ctrl_36_shdw,
309 EMIF_REG_PHY_FIFO_WE_IN_MISALINED_CLR,
310 EMIF_REG_PHY_FIFO_WE_IN_MISALINED_CLR);
312 /* Disable refreshed before leveling */
313 clrsetbits_le32(&emif->emif_sdram_ref_ctrl, EMIF_REG_INITREF_DIS_MASK,
314 EMIF_REG_INITREF_DIS_MASK);
316 /* Start Full leveling */
317 writel(DDR3_FULL_LVL, &emif->emif_rd_wr_lvl_ctl);
321 /* Check for leveling timeout */
322 if (readl(&emif->emif_status) & EMIF_REG_LEVELING_TO_MASK) {
323 printf("Leveling timeout on EMIF%d\n", emif_num(base));
327 /* Enable refreshes after leveling */
328 clrbits_le32(&emif->emif_sdram_ref_ctrl, EMIF_REG_INITREF_DIS_MASK);
330 debug("HW leveling success\n");
332 * Update slave ratios in EXT_PHY_CTRLx registers
333 * as per HW leveling output
335 update_hwleveling_output(base, regs);
338 static void dra7_reset_ddr_data(u32 base, u32 size)
340 #if defined(CONFIG_TI_EDMA3) && !defined(CONFIG_DMA)
341 enable_edma3_clocks();
343 edma3_fill(EDMA3_BASE, 1, (void *)base, 0, size);
345 disable_edma3_clocks();
347 memset((void *)base, 0, size);
351 static void dra7_enable_ecc(u32 base, const struct emif_regs *regs)
353 struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
354 u32 rgn, rgn_start, size, ctrl_reg;
356 /* ECC available only on dra76x EMIF1 */
357 if ((base != EMIF1_BASE) || !is_dra76x())
360 if (regs->emif_ecc_ctrl_reg & EMIF_ECC_CTRL_REG_ECC_EN_MASK) {
361 /* Disable high-order interleaving */
362 clrbits_le32(MA_PRIORITY, MA_HIMEM_INTERLEAVE_UN_MASK);
365 /* Clear the status flags and other history */
366 writel(readl(&emif->emif_1b_ecc_err_cnt),
367 &emif->emif_1b_ecc_err_cnt);
368 writel(0xffffffff, &emif->emif_1b_ecc_err_dist_1);
369 writel(0x2, &emif->emif_1b_ecc_err_addr_log);
370 writel(0x1, &emif->emif_2b_ecc_err_addr_log);
371 writel(EMIF_INT_WR_ECC_ERR_SYS_MASK |
372 EMIF_INT_TWOBIT_ECC_ERR_SYS_MASK |
373 EMIF_INT_ONEBIT_ECC_ERR_SYS_MASK,
374 &emif->emif_irqstatus_sys);
376 writel(regs->emif_ecc_address_range_1,
377 &emif->emif_ecc_address_range_1);
378 writel(regs->emif_ecc_address_range_2,
379 &emif->emif_ecc_address_range_2);
381 /* Disable RMW and ECC verification for read accesses */
382 ctrl_reg = (regs->emif_ecc_ctrl_reg &
383 ~EMIF_ECC_REG_RMW_EN_MASK) |
384 EMIF_ECC_CTRL_REG_ECC_VERIFY_DIS_MASK;
385 writel(ctrl_reg, &emif->emif_ecc_ctrl_reg);
387 /* Set region1 memory with 0 */
388 rgn_start = (regs->emif_ecc_address_range_1 &
389 EMIF_ECC_REG_ECC_START_ADDR_MASK) << 16;
390 rgn = rgn_start + CONFIG_SYS_SDRAM_BASE;
391 size = (regs->emif_ecc_address_range_1 &
392 EMIF_ECC_REG_ECC_END_ADDR_MASK) + 0x10000 - rgn_start;
394 if (regs->emif_ecc_ctrl_reg &
395 EMIF_ECC_REG_ECC_ADDR_RGN_1_EN_MASK)
396 dra7_reset_ddr_data(rgn, size);
398 /* Set region2 memory with 0 */
399 rgn_start = (regs->emif_ecc_address_range_2 &
400 EMIF_ECC_REG_ECC_START_ADDR_MASK) << 16;
401 rgn = rgn_start + CONFIG_SYS_SDRAM_BASE;
402 size = (regs->emif_ecc_address_range_2 &
403 EMIF_ECC_REG_ECC_END_ADDR_MASK) + 0x10000 - rgn_start;
405 if (regs->emif_ecc_ctrl_reg &
406 EMIF_ECC_REG_ECC_ADDR_RGN_2_EN_MASK)
407 dra7_reset_ddr_data(rgn, size);
409 /* Default value enables RMW and ECC verification */
410 writel(regs->emif_ecc_ctrl_reg, &emif->emif_ecc_ctrl_reg);
414 static void dra7_ddr3_init(u32 base, const struct emif_regs *regs)
416 struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
419 emif_reset_phy(base);
420 writel(0x0, &emif->emif_pwr_mgmt_ctrl);
422 do_ext_phy_settings(base, regs);
424 writel(regs->ref_ctrl | EMIF_REG_INITREF_DIS_MASK,
425 &emif->emif_sdram_ref_ctrl);
426 /* Update timing registers */
427 writel(regs->sdram_tim1, &emif->emif_sdram_tim_1);
428 writel(regs->sdram_tim2, &emif->emif_sdram_tim_2);
429 writel(regs->sdram_tim3, &emif->emif_sdram_tim_3);
431 writel(EMIF_L3_CONFIG_VAL_SYS_10_MPU_5_LL_0, &emif->emif_l3_config);
432 writel(regs->read_idle_ctrl, &emif->emif_read_idlectrl);
433 writel(regs->zq_config, &emif->emif_zq_config);
434 writel(regs->temp_alert_config, &emif->emif_temp_alert_config);
435 writel(regs->emif_rd_wr_lvl_rmp_ctl, &emif->emif_rd_wr_lvl_rmp_ctl);
436 writel(regs->emif_rd_wr_lvl_ctl, &emif->emif_rd_wr_lvl_ctl);
438 writel(regs->emif_ddr_phy_ctlr_1_init, &emif->emif_ddr_phy_ctrl_1);
439 writel(regs->emif_rd_wr_exec_thresh, &emif->emif_rd_wr_exec_thresh);
441 writel(regs->ref_ctrl, &emif->emif_sdram_ref_ctrl);
443 writel(regs->sdram_config2, &emif->emif_lpddr2_nvm_config);
444 writel(regs->sdram_config_init, &emif->emif_sdram_config);
448 writel(regs->ref_ctrl_final, &emif->emif_sdram_ref_ctrl);
450 if (regs->emif_rd_wr_lvl_rmp_ctl & EMIF_REG_RDWRLVL_EN_MASK) {
452 * Perform Dummy ECC setup just to allow hardware
453 * leveling of ECC memories
455 if (is_dra76x() && (base == EMIF1_BASE) &&
456 (regs->emif_ecc_ctrl_reg & EMIF_ECC_CTRL_REG_ECC_EN_MASK)) {
457 writel(0, &emif->emif_ecc_address_range_1);
458 writel(0, &emif->emif_ecc_address_range_2);
459 writel(EMIF_ECC_CTRL_REG_ECC_EN_MASK |
460 EMIF_ECC_CTRL_REG_ECC_ADDR_RGN_PROT_MASK,
461 &emif->emif_ecc_ctrl_reg);
464 dra7_ddr3_leveling(base, regs);
468 writel(0, &emif->emif_ecc_ctrl_reg);
471 /* Enable ECC as necessary */
472 dra7_enable_ecc(base, regs);
475 static void omap5_ddr3_init(u32 base, const struct emif_regs *regs)
477 struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
479 writel(regs->ref_ctrl, &emif->emif_sdram_ref_ctrl);
480 writel(regs->sdram_config_init, &emif->emif_sdram_config);
482 * Set SDRAM_CONFIG and PHY control registers to locked frequency
483 * and RL =7. As the default values of the Mode Registers are not
484 * defined, contents of mode Registers must be fully initialized.
485 * H/W takes care of this initialization
487 writel(regs->emif_ddr_phy_ctlr_1_init, &emif->emif_ddr_phy_ctrl_1);
489 /* Update timing registers */
490 writel(regs->sdram_tim1, &emif->emif_sdram_tim_1);
491 writel(regs->sdram_tim2, &emif->emif_sdram_tim_2);
492 writel(regs->sdram_tim3, &emif->emif_sdram_tim_3);
494 writel(regs->read_idle_ctrl, &emif->emif_read_idlectrl);
496 writel(regs->sdram_config2, &emif->emif_lpddr2_nvm_config);
497 writel(regs->sdram_config_init, &emif->emif_sdram_config);
498 do_ext_phy_settings(base, regs);
500 writel(regs->emif_rd_wr_lvl_rmp_ctl, &emif->emif_rd_wr_lvl_rmp_ctl);
501 omap5_ddr3_leveling(base, regs);
504 static void ddr3_init(u32 base, const struct emif_regs *regs)
507 omap5_ddr3_init(base, regs);
509 dra7_ddr3_init(base, regs);
513 #ifndef CONFIG_SYS_EMIF_PRECALCULATED_TIMING_REGS
514 #define print_timing_reg(reg) debug(#reg" - 0x%08x\n", (reg))
517 * Organization and refresh requirements for LPDDR2 devices of different
518 * types and densities. Derived from JESD209-2 section 2.4
520 const struct lpddr2_addressing addressing_table[] = {
521 /* Banks tREFIx10 rowx32,rowx16 colx32,colx16 density */
522 {BANKS4, T_REFI_15_6, {ROW_12, ROW_12}, {COL_7, COL_8} },/*64M */
523 {BANKS4, T_REFI_15_6, {ROW_12, ROW_12}, {COL_8, COL_9} },/*128M */
524 {BANKS4, T_REFI_7_8, {ROW_13, ROW_13}, {COL_8, COL_9} },/*256M */
525 {BANKS4, T_REFI_7_8, {ROW_13, ROW_13}, {COL_9, COL_10} },/*512M */
526 {BANKS8, T_REFI_7_8, {ROW_13, ROW_13}, {COL_9, COL_10} },/*1GS4 */
527 {BANKS8, T_REFI_3_9, {ROW_14, ROW_14}, {COL_9, COL_10} },/*2GS4 */
528 {BANKS8, T_REFI_3_9, {ROW_14, ROW_14}, {COL_10, COL_11} },/*4G */
529 {BANKS8, T_REFI_3_9, {ROW_15, ROW_15}, {COL_10, COL_11} },/*8G */
530 {BANKS4, T_REFI_7_8, {ROW_14, ROW_14}, {COL_9, COL_10} },/*1GS2 */
531 {BANKS4, T_REFI_3_9, {ROW_15, ROW_15}, {COL_9, COL_10} },/*2GS2 */
534 static const u32 lpddr2_density_2_size_in_mbytes[] = {
548 * Calculate the period of DDR clock from frequency value and set the
549 * denominator and numerator in global variables for easy access later
551 static void set_ddr_clk_period(u32 freq)
555 * period_in_ns = 10^9/freq
559 cancel_out(T_num, T_den, 200);
564 * Convert time in nano seconds to number of cycles of DDR clock
566 static inline u32 ns_2_cycles(u32 ns)
568 return ((ns * (*T_den)) + (*T_num) - 1) / (*T_num);
572 * ns_2_cycles with the difference that the time passed is 2 times the actual
573 * value(to avoid fractions). The cycles returned is for the original value of
574 * the timing parameter
576 static inline u32 ns_x2_2_cycles(u32 ns)
578 return ((ns * (*T_den)) + (*T_num) * 2 - 1) / ((*T_num) * 2);
582 * Find addressing table index based on the device's type(S2 or S4) and
585 s8 addressing_table_index(u8 type, u8 density, u8 width)
588 if ((density > LPDDR2_DENSITY_8Gb) || (width == LPDDR2_IO_WIDTH_8))
592 * Look at the way ADDR_TABLE_INDEX* values have been defined
593 * in emif.h compared to LPDDR2_DENSITY_* values
594 * The table is layed out in the increasing order of density
595 * (ignoring type). The exceptions 1GS2 and 2GS2 have been placed
598 if ((type == LPDDR2_TYPE_S2) && (density == LPDDR2_DENSITY_1Gb))
599 index = ADDR_TABLE_INDEX1GS2;
600 else if ((type == LPDDR2_TYPE_S2) && (density == LPDDR2_DENSITY_2Gb))
601 index = ADDR_TABLE_INDEX2GS2;
605 debug("emif: addressing table index %d\n", index);
611 * Find the the right timing table from the array of timing
612 * tables of the device using DDR clock frequency
614 static const struct lpddr2_ac_timings *get_timings_table(const struct
615 lpddr2_ac_timings *const *device_timings,
618 u32 i, temp, freq_nearest;
619 const struct lpddr2_ac_timings *timings = 0;
621 emif_assert(freq <= MAX_LPDDR2_FREQ);
622 emif_assert(device_timings);
625 * Start with the maximum allowed frequency - that is always safe
627 freq_nearest = MAX_LPDDR2_FREQ;
629 * Find the timings table that has the max frequency value:
630 * i. Above or equal to the DDR frequency - safe
631 * ii. The lowest that satisfies condition (i) - optimal
633 for (i = 0; (i < MAX_NUM_SPEEDBINS) && device_timings[i]; i++) {
634 temp = device_timings[i]->max_freq;
635 if ((temp >= freq) && (temp <= freq_nearest)) {
637 timings = device_timings[i];
640 debug("emif: timings table: %d\n", freq_nearest);
645 * Finds the value of emif_sdram_config_reg
646 * All parameters are programmed based on the device on CS0.
647 * If there is a device on CS1, it will be same as that on CS0 or
648 * it will be NVM. We don't support NVM yet.
649 * If cs1_device pointer is NULL it is assumed that there is no device
652 static u32 get_sdram_config_reg(const struct lpddr2_device_details *cs0_device,
653 const struct lpddr2_device_details *cs1_device,
654 const struct lpddr2_addressing *addressing,
659 config_reg |= (cs0_device->type + 4) << EMIF_REG_SDRAM_TYPE_SHIFT;
660 config_reg |= EMIF_INTERLEAVING_POLICY_MAX_INTERLEAVING <<
661 EMIF_REG_IBANK_POS_SHIFT;
663 config_reg |= cs0_device->io_width << EMIF_REG_NARROW_MODE_SHIFT;
665 config_reg |= RL << EMIF_REG_CL_SHIFT;
667 config_reg |= addressing->row_sz[cs0_device->io_width] <<
668 EMIF_REG_ROWSIZE_SHIFT;
670 config_reg |= addressing->num_banks << EMIF_REG_IBANK_SHIFT;
672 config_reg |= (cs1_device ? EBANK_CS1_EN : EBANK_CS1_DIS) <<
673 EMIF_REG_EBANK_SHIFT;
675 config_reg |= addressing->col_sz[cs0_device->io_width] <<
676 EMIF_REG_PAGESIZE_SHIFT;
681 static u32 get_sdram_ref_ctrl(u32 freq,
682 const struct lpddr2_addressing *addressing)
684 u32 ref_ctrl = 0, val = 0, freq_khz;
685 freq_khz = freq / 1000;
687 * refresh rate to be set is 'tREFI * freq in MHz
688 * division by 10000 to account for khz and x10 in t_REFI_us_x10
690 val = addressing->t_REFI_us_x10 * freq_khz / 10000;
691 ref_ctrl |= val << EMIF_REG_REFRESH_RATE_SHIFT;
696 static u32 get_sdram_tim_1_reg(const struct lpddr2_ac_timings *timings,
697 const struct lpddr2_min_tck *min_tck,
698 const struct lpddr2_addressing *addressing)
700 u32 tim1 = 0, val = 0;
701 val = max(min_tck->tWTR, ns_x2_2_cycles(timings->tWTRx2)) - 1;
702 tim1 |= val << EMIF_REG_T_WTR_SHIFT;
704 if (addressing->num_banks == BANKS8)
705 val = (timings->tFAW * (*T_den) + 4 * (*T_num) - 1) /
708 val = max(min_tck->tRRD, ns_2_cycles(timings->tRRD)) - 1;
710 tim1 |= val << EMIF_REG_T_RRD_SHIFT;
712 val = ns_2_cycles(timings->tRASmin + timings->tRPab) - 1;
713 tim1 |= val << EMIF_REG_T_RC_SHIFT;
715 val = max(min_tck->tRAS_MIN, ns_2_cycles(timings->tRASmin)) - 1;
716 tim1 |= val << EMIF_REG_T_RAS_SHIFT;
718 val = max(min_tck->tWR, ns_2_cycles(timings->tWR)) - 1;
719 tim1 |= val << EMIF_REG_T_WR_SHIFT;
721 val = max(min_tck->tRCD, ns_2_cycles(timings->tRCD)) - 1;
722 tim1 |= val << EMIF_REG_T_RCD_SHIFT;
724 val = max(min_tck->tRP_AB, ns_2_cycles(timings->tRPab)) - 1;
725 tim1 |= val << EMIF_REG_T_RP_SHIFT;
730 static u32 get_sdram_tim_2_reg(const struct lpddr2_ac_timings *timings,
731 const struct lpddr2_min_tck *min_tck)
733 u32 tim2 = 0, val = 0;
734 val = max(min_tck->tCKE, timings->tCKE) - 1;
735 tim2 |= val << EMIF_REG_T_CKE_SHIFT;
737 val = max(min_tck->tRTP, ns_x2_2_cycles(timings->tRTPx2)) - 1;
738 tim2 |= val << EMIF_REG_T_RTP_SHIFT;
741 * tXSRD = tRFCab + 10 ns. XSRD and XSNR should have the
744 val = ns_2_cycles(timings->tXSR) - 1;
745 tim2 |= val << EMIF_REG_T_XSRD_SHIFT;
746 tim2 |= val << EMIF_REG_T_XSNR_SHIFT;
748 val = max(min_tck->tXP, ns_x2_2_cycles(timings->tXPx2)) - 1;
749 tim2 |= val << EMIF_REG_T_XP_SHIFT;
754 static u32 get_sdram_tim_3_reg(const struct lpddr2_ac_timings *timings,
755 const struct lpddr2_min_tck *min_tck,
756 const struct lpddr2_addressing *addressing)
758 u32 tim3 = 0, val = 0;
759 val = min(timings->tRASmax * 10 / addressing->t_REFI_us_x10 - 1, 0xF);
760 tim3 |= val << EMIF_REG_T_RAS_MAX_SHIFT;
762 val = ns_2_cycles(timings->tRFCab) - 1;
763 tim3 |= val << EMIF_REG_T_RFC_SHIFT;
765 val = ns_x2_2_cycles(timings->tDQSCKMAXx2) - 1;
766 tim3 |= val << EMIF_REG_T_TDQSCKMAX_SHIFT;
768 val = ns_2_cycles(timings->tZQCS) - 1;
769 tim3 |= val << EMIF_REG_ZQ_ZQCS_SHIFT;
771 val = max(min_tck->tCKESR, ns_2_cycles(timings->tCKESR)) - 1;
772 tim3 |= val << EMIF_REG_T_CKESR_SHIFT;
777 static u32 get_zq_config_reg(const struct lpddr2_device_details *cs1_device,
778 const struct lpddr2_addressing *addressing,
784 EMIF_ZQCS_INTERVAL_DVFS_IN_US * 10 /
785 addressing->t_REFI_us_x10;
788 EMIF_ZQCS_INTERVAL_NORMAL_IN_US * 10 /
789 addressing->t_REFI_us_x10;
790 zq |= val << EMIF_REG_ZQ_REFINTERVAL_SHIFT;
792 zq |= (REG_ZQ_ZQCL_MULT - 1) << EMIF_REG_ZQ_ZQCL_MULT_SHIFT;
794 zq |= (REG_ZQ_ZQINIT_MULT - 1) << EMIF_REG_ZQ_ZQINIT_MULT_SHIFT;
796 zq |= REG_ZQ_SFEXITEN_ENABLE << EMIF_REG_ZQ_SFEXITEN_SHIFT;
799 * Assuming that two chipselects have a single calibration resistor
800 * If there are indeed two calibration resistors, then this flag should
801 * be enabled to take advantage of dual calibration feature.
802 * This data should ideally come from board files. But considering
803 * that none of the boards today have calibration resistors per CS,
804 * it would be an unnecessary overhead.
806 zq |= REG_ZQ_DUALCALEN_DISABLE << EMIF_REG_ZQ_DUALCALEN_SHIFT;
808 zq |= REG_ZQ_CS0EN_ENABLE << EMIF_REG_ZQ_CS0EN_SHIFT;
810 zq |= (cs1_device ? 1 : 0) << EMIF_REG_ZQ_CS1EN_SHIFT;
815 static u32 get_temp_alert_config(const struct lpddr2_device_details *cs1_device,
816 const struct lpddr2_addressing *addressing,
819 u32 alert = 0, interval;
821 TEMP_ALERT_POLL_INTERVAL_MS * 10000 / addressing->t_REFI_us_x10;
824 alert |= interval << EMIF_REG_TA_REFINTERVAL_SHIFT;
826 alert |= TEMP_ALERT_CONFIG_DEVCT_1 << EMIF_REG_TA_DEVCNT_SHIFT;
828 alert |= TEMP_ALERT_CONFIG_DEVWDT_32 << EMIF_REG_TA_DEVWDT_SHIFT;
830 alert |= 1 << EMIF_REG_TA_SFEXITEN_SHIFT;
832 alert |= 1 << EMIF_REG_TA_CS0EN_SHIFT;
834 alert |= (cs1_device ? 1 : 0) << EMIF_REG_TA_CS1EN_SHIFT;
839 static u32 get_read_idle_ctrl_reg(u8 volt_ramp)
841 u32 idle = 0, val = 0;
843 val = ns_2_cycles(READ_IDLE_INTERVAL_DVFS) / 64 - 1;
845 /*Maximum value in normal conditions - suggested by hw team */
847 idle |= val << EMIF_REG_READ_IDLE_INTERVAL_SHIFT;
849 idle |= EMIF_REG_READ_IDLE_LEN_VAL << EMIF_REG_READ_IDLE_LEN_SHIFT;
854 static u32 get_ddr_phy_ctrl_1(u32 freq, u8 RL)
856 u32 phy = 0, val = 0;
858 phy |= (RL + 2) << EMIF_REG_READ_LATENCY_SHIFT;
860 if (freq <= 100000000)
861 val = EMIF_DLL_SLAVE_DLY_CTRL_100_MHZ_AND_LESS;
862 else if (freq <= 200000000)
863 val = EMIF_DLL_SLAVE_DLY_CTRL_200_MHZ;
865 val = EMIF_DLL_SLAVE_DLY_CTRL_400_MHZ;
866 phy |= val << EMIF_REG_DLL_SLAVE_DLY_CTRL_SHIFT;
868 /* Other fields are constant magic values. Hardcode them together */
869 phy |= EMIF_DDR_PHY_CTRL_1_BASE_VAL <<
870 EMIF_EMIF_DDR_PHY_CTRL_1_BASE_VAL_SHIFT;
875 static u32 get_emif_mem_size(u32 base)
877 u32 size_mbytes = 0, temp;
878 struct emif_device_details dev_details;
879 struct lpddr2_device_details cs0_dev_details, cs1_dev_details;
880 u32 emif_nr = emif_num(base);
882 emif_reset_phy(base);
883 dev_details.cs0_device_details = emif_get_device_details(emif_nr, CS0,
885 dev_details.cs1_device_details = emif_get_device_details(emif_nr, CS1,
887 emif_reset_phy(base);
889 if (dev_details.cs0_device_details) {
890 temp = dev_details.cs0_device_details->density;
891 size_mbytes += lpddr2_density_2_size_in_mbytes[temp];
894 if (dev_details.cs1_device_details) {
895 temp = dev_details.cs1_device_details->density;
896 size_mbytes += lpddr2_density_2_size_in_mbytes[temp];
898 /* convert to bytes */
899 return size_mbytes << 20;
902 /* Gets the encoding corresponding to a given DMM section size */
903 u32 get_dmm_section_size_map(u32 section_size)
906 * Section size mapping:
907 * 0x0: 16-MiB section
908 * 0x1: 32-MiB section
909 * 0x2: 64-MiB section
910 * 0x3: 128-MiB section
911 * 0x4: 256-MiB section
912 * 0x5: 512-MiB section
916 section_size >>= 24; /* divide by 16 MB */
917 return log_2_n_round_down(section_size);
920 static void emif_calculate_regs(
921 const struct emif_device_details *emif_dev_details,
922 u32 freq, struct emif_regs *regs)
925 const struct lpddr2_addressing *addressing;
926 const struct lpddr2_ac_timings *timings;
927 const struct lpddr2_min_tck *min_tck;
928 const struct lpddr2_device_details *cs0_dev_details =
929 emif_dev_details->cs0_device_details;
930 const struct lpddr2_device_details *cs1_dev_details =
931 emif_dev_details->cs1_device_details;
932 const struct lpddr2_device_timings *cs0_dev_timings =
933 emif_dev_details->cs0_device_timings;
935 emif_assert(emif_dev_details);
938 * You can not have a device on CS1 without one on CS0
939 * So configuring EMIF without a device on CS0 doesn't
942 emif_assert(cs0_dev_details);
943 emif_assert(cs0_dev_details->type != LPDDR2_TYPE_NVM);
945 * If there is a device on CS1 it should be same type as CS0
946 * (or NVM. But NVM is not supported in this driver yet)
948 emif_assert((cs1_dev_details == NULL) ||
949 (cs1_dev_details->type == LPDDR2_TYPE_NVM) ||
950 (cs0_dev_details->type == cs1_dev_details->type));
951 emif_assert(freq <= MAX_LPDDR2_FREQ);
953 set_ddr_clk_period(freq);
956 * The device on CS0 is used for all timing calculations
957 * There is only one set of registers for timings per EMIF. So, if the
958 * second CS(CS1) has a device, it should have the same timings as the
961 timings = get_timings_table(cs0_dev_timings->ac_timings, freq);
962 emif_assert(timings);
963 min_tck = cs0_dev_timings->min_tck;
965 temp = addressing_table_index(cs0_dev_details->type,
966 cs0_dev_details->density,
967 cs0_dev_details->io_width);
969 emif_assert((temp >= 0));
970 addressing = &(addressing_table[temp]);
971 emif_assert(addressing);
973 sys_freq = get_sys_clk_freq();
975 regs->sdram_config_init = get_sdram_config_reg(cs0_dev_details,
977 addressing, RL_BOOT);
979 regs->sdram_config = get_sdram_config_reg(cs0_dev_details,
981 addressing, RL_FINAL);
983 regs->ref_ctrl = get_sdram_ref_ctrl(freq, addressing);
985 regs->sdram_tim1 = get_sdram_tim_1_reg(timings, min_tck, addressing);
987 regs->sdram_tim2 = get_sdram_tim_2_reg(timings, min_tck);
989 regs->sdram_tim3 = get_sdram_tim_3_reg(timings, min_tck, addressing);
991 regs->read_idle_ctrl = get_read_idle_ctrl_reg(LPDDR2_VOLTAGE_STABLE);
993 regs->temp_alert_config =
994 get_temp_alert_config(cs1_dev_details, addressing, 0);
996 regs->zq_config = get_zq_config_reg(cs1_dev_details, addressing,
997 LPDDR2_VOLTAGE_STABLE);
999 regs->emif_ddr_phy_ctlr_1_init =
1000 get_ddr_phy_ctrl_1(sys_freq / 2, RL_BOOT);
1002 regs->emif_ddr_phy_ctlr_1 =
1003 get_ddr_phy_ctrl_1(freq, RL_FINAL);
1007 print_timing_reg(regs->sdram_config_init);
1008 print_timing_reg(regs->sdram_config);
1009 print_timing_reg(regs->ref_ctrl);
1010 print_timing_reg(regs->sdram_tim1);
1011 print_timing_reg(regs->sdram_tim2);
1012 print_timing_reg(regs->sdram_tim3);
1013 print_timing_reg(regs->read_idle_ctrl);
1014 print_timing_reg(regs->temp_alert_config);
1015 print_timing_reg(regs->zq_config);
1016 print_timing_reg(regs->emif_ddr_phy_ctlr_1);
1017 print_timing_reg(regs->emif_ddr_phy_ctlr_1_init);
1019 #endif /* CONFIG_SYS_EMIF_PRECALCULATED_TIMING_REGS */
1021 #ifdef CONFIG_SYS_AUTOMATIC_SDRAM_DETECTION
1022 const char *get_lpddr2_type(u8 type_id)
1025 case LPDDR2_TYPE_S4:
1027 case LPDDR2_TYPE_S2:
1034 const char *get_lpddr2_io_width(u8 width_id)
1037 case LPDDR2_IO_WIDTH_8:
1039 case LPDDR2_IO_WIDTH_16:
1041 case LPDDR2_IO_WIDTH_32:
1048 const char *get_lpddr2_manufacturer(u32 manufacturer)
1050 switch (manufacturer) {
1051 case LPDDR2_MANUFACTURER_SAMSUNG:
1053 case LPDDR2_MANUFACTURER_QIMONDA:
1055 case LPDDR2_MANUFACTURER_ELPIDA:
1057 case LPDDR2_MANUFACTURER_ETRON:
1059 case LPDDR2_MANUFACTURER_NANYA:
1061 case LPDDR2_MANUFACTURER_HYNIX:
1063 case LPDDR2_MANUFACTURER_MOSEL:
1065 case LPDDR2_MANUFACTURER_WINBOND:
1067 case LPDDR2_MANUFACTURER_ESMT:
1069 case LPDDR2_MANUFACTURER_SPANSION:
1071 case LPDDR2_MANUFACTURER_SST:
1073 case LPDDR2_MANUFACTURER_ZMOS:
1075 case LPDDR2_MANUFACTURER_INTEL:
1077 case LPDDR2_MANUFACTURER_NUMONYX:
1079 case LPDDR2_MANUFACTURER_MICRON:
1086 static void display_sdram_details(u32 emif_nr, u32 cs,
1087 struct lpddr2_device_details *device)
1089 const char *mfg_str;
1090 const char *type_str;
1091 char density_str[10];
1094 debug("EMIF%d CS%d\t", emif_nr, cs);
1101 mfg_str = get_lpddr2_manufacturer(device->manufacturer);
1102 type_str = get_lpddr2_type(device->type);
1104 density = lpddr2_density_2_size_in_mbytes[device->density];
1105 if ((density / 1024 * 1024) == density) {
1107 sprintf(density_str, "%d GB", density);
1109 sprintf(density_str, "%d MB", density);
1110 if (mfg_str && type_str)
1111 debug("%s\t\t%s\t%s\n", mfg_str, type_str, density_str);
1114 static u8 is_lpddr2_sdram_present(u32 base, u32 cs,
1115 struct lpddr2_device_details *lpddr2_device)
1119 mr = get_mr(base, cs, LPDDR2_MR0);
1121 /* Mode register value bigger than 8 bit */
1125 temp = (mr & LPDDR2_MR0_DI_MASK) >> LPDDR2_MR0_DI_SHIFT;
1130 temp = (mr & LPDDR2_MR0_DNVI_MASK) >> LPDDR2_MR0_DNVI_SHIFT;
1133 /* DNV supported - But DNV is only supported for NVM */
1137 mr = get_mr(base, cs, LPDDR2_MR4);
1139 /* Mode register value bigger than 8 bit */
1143 mr = get_mr(base, cs, LPDDR2_MR5);
1145 /* Mode register value bigger than 8 bit */
1149 if (!get_lpddr2_manufacturer(mr)) {
1150 /* Manufacturer not identified */
1153 lpddr2_device->manufacturer = mr;
1155 mr = get_mr(base, cs, LPDDR2_MR6);
1157 /* Mode register value bigger than 8 bit */
1161 mr = get_mr(base, cs, LPDDR2_MR7);
1163 /* Mode register value bigger than 8 bit */
1167 mr = get_mr(base, cs, LPDDR2_MR8);
1169 /* Mode register value bigger than 8 bit */
1173 temp = (mr & MR8_TYPE_MASK) >> MR8_TYPE_SHIFT;
1174 if (!get_lpddr2_type(temp)) {
1178 lpddr2_device->type = temp;
1180 temp = (mr & MR8_DENSITY_MASK) >> MR8_DENSITY_SHIFT;
1181 if (temp > LPDDR2_DENSITY_32Gb) {
1182 /* Density not supported */
1185 lpddr2_device->density = temp;
1187 temp = (mr & MR8_IO_WIDTH_MASK) >> MR8_IO_WIDTH_SHIFT;
1188 if (!get_lpddr2_io_width(temp)) {
1189 /* IO width unsupported value */
1192 lpddr2_device->io_width = temp;
1195 * If all the above tests pass we should
1196 * have a device on this chip-select
1201 struct lpddr2_device_details *emif_get_device_details(u32 emif_nr, u8 cs,
1202 struct lpddr2_device_details *lpddr2_dev_details)
1205 u32 base = (emif_nr == 1) ? EMIF1_BASE : EMIF2_BASE;
1207 struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
1209 if (!lpddr2_dev_details)
1212 /* Do the minimum init for mode register accesses */
1213 if (!(running_from_sdram() || warm_reset())) {
1214 phy = get_ddr_phy_ctrl_1(get_sys_clk_freq() / 2, RL_BOOT);
1215 writel(phy, &emif->emif_ddr_phy_ctrl_1);
1218 if (!(is_lpddr2_sdram_present(base, cs, lpddr2_dev_details)))
1221 display_sdram_details(emif_num(base), cs, lpddr2_dev_details);
1223 return lpddr2_dev_details;
1225 #endif /* CONFIG_SYS_AUTOMATIC_SDRAM_DETECTION */
1227 static void do_sdram_init(u32 base)
1229 const struct emif_regs *regs;
1230 u32 in_sdram, emif_nr;
1232 debug(">>do_sdram_init() %x\n", base);
1234 in_sdram = running_from_sdram();
1235 emif_nr = (base == EMIF1_BASE) ? 1 : 2;
1237 #ifdef CONFIG_SYS_EMIF_PRECALCULATED_TIMING_REGS
1238 emif_get_reg_dump(emif_nr, ®s);
1240 debug("EMIF: reg dump not provided\n");
1245 * The user has not provided the register values. We need to
1246 * calculate it based on the timings and the DDR frequency
1248 struct emif_device_details dev_details;
1249 struct emif_regs calculated_regs;
1252 * Get device details:
1253 * - Discovered if CONFIG_SYS_AUTOMATIC_SDRAM_DETECTION is set
1254 * - Obtained from user otherwise
1256 struct lpddr2_device_details cs0_dev_details, cs1_dev_details;
1257 emif_reset_phy(base);
1258 dev_details.cs0_device_details = emif_get_device_details(emif_nr, CS0,
1260 dev_details.cs1_device_details = emif_get_device_details(emif_nr, CS1,
1262 emif_reset_phy(base);
1264 /* Return if no devices on this EMIF */
1265 if (!dev_details.cs0_device_details &&
1266 !dev_details.cs1_device_details) {
1271 * Get device timings:
1272 * - Default timings specified by JESD209-2 if
1273 * CONFIG_SYS_DEFAULT_LPDDR2_TIMINGS is set
1274 * - Obtained from user otherwise
1276 emif_get_device_timings(emif_nr, &dev_details.cs0_device_timings,
1277 &dev_details.cs1_device_timings);
1279 /* Calculate the register values */
1280 emif_calculate_regs(&dev_details, omap_ddr_clk(), &calculated_regs);
1281 regs = &calculated_regs;
1282 #endif /* CONFIG_SYS_EMIF_PRECALCULATED_TIMING_REGS */
1285 * Initializing the DDR device can not happen from SDRAM.
1286 * Changing the timing registers in EMIF can happen(going from one
1289 if (!in_sdram && (!warm_reset() || is_dra7xx())) {
1290 if (emif_sdram_type(regs->sdram_config) ==
1291 EMIF_SDRAM_TYPE_LPDDR2)
1292 lpddr2_init(base, regs);
1293 #ifndef CONFIG_OMAP44XX
1295 ddr3_init(base, regs);
1298 #ifdef CONFIG_OMAP54XX
1299 if (warm_reset() && (emif_sdram_type(regs->sdram_config) ==
1300 EMIF_SDRAM_TYPE_DDR3) && !is_dra7xx()) {
1301 set_lpmode_selfrefresh(base);
1302 emif_reset_phy(base);
1303 omap5_ddr3_leveling(base, regs);
1307 /* Write to the shadow registers */
1308 emif_update_timings(base, regs);
1310 debug("<<do_sdram_init() %x\n", base);
1313 void emif_post_init_config(u32 base)
1315 struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
1316 u32 omap_rev = omap_revision();
1318 /* reset phy on ES2.0 */
1319 if (omap_rev == OMAP4430_ES2_0)
1320 emif_reset_phy(base);
1322 /* Put EMIF back in smart idle on ES1.0 */
1323 if (omap_rev == OMAP4430_ES1_0)
1324 writel(0x80000000, &emif->emif_pwr_mgmt_ctrl);
1327 void dmm_init(u32 base)
1329 const struct dmm_lisa_map_regs *lisa_map_regs;
1330 u32 i, section, valid;
1332 #ifdef CONFIG_SYS_EMIF_PRECALCULATED_TIMING_REGS
1333 emif_get_dmm_regs(&lisa_map_regs);
1335 u32 emif1_size, emif2_size, mapped_size, section_map = 0;
1336 u32 section_cnt, sys_addr;
1337 struct dmm_lisa_map_regs lis_map_regs_calculated = {0};
1341 sys_addr = CONFIG_SYS_SDRAM_BASE;
1342 emif1_size = get_emif_mem_size(EMIF1_BASE);
1343 emif2_size = get_emif_mem_size(EMIF2_BASE);
1344 debug("emif1_size 0x%x emif2_size 0x%x\n", emif1_size, emif2_size);
1346 if (!emif1_size && !emif2_size)
1349 /* symmetric interleaved section */
1350 if (emif1_size && emif2_size) {
1351 mapped_size = min(emif1_size, emif2_size);
1352 section_map = DMM_LISA_MAP_INTERLEAVED_BASE_VAL;
1353 section_map |= 0 << EMIF_SDRC_ADDR_SHIFT;
1355 section_map |= (sys_addr >> 24) <<
1356 EMIF_SYS_ADDR_SHIFT;
1357 section_map |= get_dmm_section_size_map(mapped_size * 2)
1358 << EMIF_SYS_SIZE_SHIFT;
1359 lis_map_regs_calculated.dmm_lisa_map_3 = section_map;
1360 emif1_size -= mapped_size;
1361 emif2_size -= mapped_size;
1362 sys_addr += (mapped_size * 2);
1367 * Single EMIF section(we can have a maximum of 1 single EMIF
1368 * section- either EMIF1 or EMIF2 or none, but not both)
1371 section_map = DMM_LISA_MAP_EMIF1_ONLY_BASE_VAL;
1372 section_map |= get_dmm_section_size_map(emif1_size)
1373 << EMIF_SYS_SIZE_SHIFT;
1375 section_map |= (mapped_size >> 24) <<
1376 EMIF_SDRC_ADDR_SHIFT;
1378 section_map |= (sys_addr >> 24) << EMIF_SYS_ADDR_SHIFT;
1382 section_map = DMM_LISA_MAP_EMIF2_ONLY_BASE_VAL;
1383 section_map |= get_dmm_section_size_map(emif2_size) <<
1384 EMIF_SYS_SIZE_SHIFT;
1386 section_map |= mapped_size >> 24 << EMIF_SDRC_ADDR_SHIFT;
1388 section_map |= sys_addr >> 24 << EMIF_SYS_ADDR_SHIFT;
1392 if (section_cnt == 2) {
1393 /* Only 1 section - either symmetric or single EMIF */
1394 lis_map_regs_calculated.dmm_lisa_map_3 = section_map;
1395 lis_map_regs_calculated.dmm_lisa_map_2 = 0;
1396 lis_map_regs_calculated.dmm_lisa_map_1 = 0;
1398 /* 2 sections - 1 symmetric, 1 single EMIF */
1399 lis_map_regs_calculated.dmm_lisa_map_2 = section_map;
1400 lis_map_regs_calculated.dmm_lisa_map_1 = 0;
1403 /* TRAP for invalid TILER mappings in section 0 */
1404 lis_map_regs_calculated.dmm_lisa_map_0 = DMM_LISA_MAP_0_INVAL_ADDR_TRAP;
1406 if (omap_revision() >= OMAP4460_ES1_0)
1407 lis_map_regs_calculated.is_ma_present = 1;
1409 lisa_map_regs = &lis_map_regs_calculated;
1411 struct dmm_lisa_map_regs *hw_lisa_map_regs =
1412 (struct dmm_lisa_map_regs *)base;
1414 writel(0, &hw_lisa_map_regs->dmm_lisa_map_3);
1415 writel(0, &hw_lisa_map_regs->dmm_lisa_map_2);
1416 writel(0, &hw_lisa_map_regs->dmm_lisa_map_1);
1417 writel(0, &hw_lisa_map_regs->dmm_lisa_map_0);
1419 writel(lisa_map_regs->dmm_lisa_map_3,
1420 &hw_lisa_map_regs->dmm_lisa_map_3);
1421 writel(lisa_map_regs->dmm_lisa_map_2,
1422 &hw_lisa_map_regs->dmm_lisa_map_2);
1423 writel(lisa_map_regs->dmm_lisa_map_1,
1424 &hw_lisa_map_regs->dmm_lisa_map_1);
1425 writel(lisa_map_regs->dmm_lisa_map_0,
1426 &hw_lisa_map_regs->dmm_lisa_map_0);
1428 if (lisa_map_regs->is_ma_present) {
1430 (struct dmm_lisa_map_regs *)MA_BASE;
1432 writel(lisa_map_regs->dmm_lisa_map_3,
1433 &hw_lisa_map_regs->dmm_lisa_map_3);
1434 writel(lisa_map_regs->dmm_lisa_map_2,
1435 &hw_lisa_map_regs->dmm_lisa_map_2);
1436 writel(lisa_map_regs->dmm_lisa_map_1,
1437 &hw_lisa_map_regs->dmm_lisa_map_1);
1438 writel(lisa_map_regs->dmm_lisa_map_0,
1439 &hw_lisa_map_regs->dmm_lisa_map_0);
1441 setbits_le32(MA_PRIORITY, MA_HIMEM_INTERLEAVE_UN_MASK);
1445 * EMIF should be configured only when
1446 * memory is mapped on it. Using emif1_enabled
1447 * and emif2_enabled variables for this.
1451 for (i = 0; i < 4; i++) {
1452 section = __raw_readl(DMM_BASE + i*4);
1453 valid = (section & EMIF_SDRC_MAP_MASK) >>
1454 (EMIF_SDRC_MAP_SHIFT);
1469 static void do_bug0039_workaround(u32 base)
1471 u32 val, i, clkctrl;
1472 struct emif_reg_struct *emif_base = (struct emif_reg_struct *)base;
1473 const struct read_write_regs *bug_00339_regs;
1475 u32 *phy_status_base = &emif_base->emif_ddr_phy_status[0];
1476 u32 *phy_ctrl_base = &emif_base->emif_ddr_ext_phy_ctrl_1;
1481 bug_00339_regs = get_bug_regs(&iterations);
1483 /* Put EMIF in to idle */
1484 clkctrl = __raw_readl((*prcm)->cm_memif_clkstctrl);
1485 __raw_writel(0x0, (*prcm)->cm_memif_clkstctrl);
1487 /* Copy the phy status registers in to phy ctrl shadow registers */
1488 for (i = 0; i < iterations; i++) {
1489 val = __raw_readl(phy_status_base +
1490 bug_00339_regs[i].read_reg - 1);
1492 __raw_writel(val, phy_ctrl_base +
1493 ((bug_00339_regs[i].write_reg - 1) << 1));
1495 __raw_writel(val, phy_ctrl_base +
1496 (bug_00339_regs[i].write_reg << 1) - 1);
1499 /* Disable leveling */
1500 writel(0x0, &emif_base->emif_rd_wr_lvl_rmp_ctl);
1502 __raw_writel(clkctrl, (*prcm)->cm_memif_clkstctrl);
1506 * SDRAM initialization:
1507 * SDRAM initialization has two parts:
1508 * 1. Configuring the SDRAM device
1509 * 2. Update the AC timings related parameters in the EMIF module
1510 * (1) should be done only once and should not be done while we are
1511 * running from SDRAM.
1512 * (2) can and should be done more than once if OPP changes.
1513 * Particularly, this may be needed when we boot without SPL and
1514 * and using Configuration Header(CH). ROM code supports only at 50% OPP
1515 * at boot (low power boot). So u-boot has to switch to OPP100 and update
1516 * the frequency. So,
1517 * Doing (1) and (2) makes sense - first time initialization
1518 * Doing (2) and not (1) makes sense - OPP change (when using CH)
1519 * Doing (1) and not (2) doen't make sense
1520 * See do_sdram_init() for the details
1522 void sdram_init(void)
1524 u32 in_sdram, size_prog, size_detect;
1525 struct emif_reg_struct *emif = (struct emif_reg_struct *)EMIF1_BASE;
1526 u32 sdram_type = emif_sdram_type(emif->emif_sdram_config);
1528 debug(">>sdram_init()\n");
1530 if (omap_hw_init_context() == OMAP_INIT_CONTEXT_UBOOT_AFTER_SPL)
1533 in_sdram = running_from_sdram();
1534 debug("in_sdram = %d\n", in_sdram);
1537 if ((sdram_type == EMIF_SDRAM_TYPE_LPDDR2) && !warm_reset())
1538 bypass_dpll((*prcm)->cm_clkmode_dpll_core);
1539 else if (sdram_type == EMIF_SDRAM_TYPE_DDR3)
1540 writel(CM_DLL_CTRL_NO_OVERRIDE, (*prcm)->cm_dll_ctrl);
1547 do_sdram_init(EMIF1_BASE);
1550 do_sdram_init(EMIF2_BASE);
1552 if (!(in_sdram || warm_reset())) {
1554 emif_post_init_config(EMIF1_BASE);
1556 emif_post_init_config(EMIF2_BASE);
1559 /* for the shadow registers to take effect */
1560 if (sdram_type == EMIF_SDRAM_TYPE_LPDDR2)
1563 /* Do some testing after the init */
1565 size_prog = omap_sdram_size();
1566 size_prog = log_2_n_round_down(size_prog);
1567 size_prog = (1 << size_prog);
1569 size_detect = get_ram_size((long *)CONFIG_SYS_SDRAM_BASE,
1571 /* Compare with the size programmed */
1572 if (size_detect != size_prog) {
1573 printf("SDRAM: identified size not same as expected"
1574 " size identified: %x expected: %x\n",
1578 debug("get_ram_size() successful");
1581 #if defined(CONFIG_TI_SECURE_DEVICE)
1583 * On HS devices, do static EMIF firewall configuration
1584 * but only do it if not already running in SDRAM
1587 if (0 != secure_emif_reserve())
1590 /* On HS devices, ensure static EMIF firewall APIs are locked */
1591 if (0 != secure_emif_firewall_lock())
1595 if (sdram_type == EMIF_SDRAM_TYPE_DDR3 &&
1596 (!in_sdram && !warm_reset()) && (!is_dra7xx())) {
1598 do_bug0039_workaround(EMIF1_BASE);
1600 do_bug0039_workaround(EMIF2_BASE);
1603 debug("<<sdram_init()\n");