1 // SPDX-License-Identifier: GPL-2.0+
6 * Texas Instruments, <www.ti.com>
8 * Aneesh V <aneesh@ti.com>
17 #include <asm/arch/clock.h>
18 #include <asm/arch/sys_proto.h>
19 #include <asm/omap_common.h>
20 #include <asm/omap_sec_common.h>
21 #include <asm/utils.h>
22 #include <linux/compiler.h>
23 #include <asm/ti-common/ti-edma3.h>
25 static int emif1_enabled = -1, emif2_enabled = -1;
27 void set_lpmode_selfrefresh(u32 base)
29 struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
32 reg = readl(&emif->emif_pwr_mgmt_ctrl);
33 reg &= ~EMIF_REG_LP_MODE_MASK;
34 reg |= LP_MODE_SELF_REFRESH << EMIF_REG_LP_MODE_SHIFT;
35 reg &= ~EMIF_REG_SR_TIM_MASK;
36 writel(reg, &emif->emif_pwr_mgmt_ctrl);
38 /* dummy read for the new SR_TIM to be loaded */
39 readl(&emif->emif_pwr_mgmt_ctrl);
42 void force_emif_self_refresh()
44 set_lpmode_selfrefresh(EMIF1_BASE);
46 set_lpmode_selfrefresh(EMIF2_BASE);
49 inline u32 emif_num(u32 base)
51 if (base == EMIF1_BASE)
53 else if (base == EMIF2_BASE)
59 static inline u32 get_mr(u32 base, u32 cs, u32 mr_addr)
62 struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
64 mr_addr |= cs << EMIF_REG_CS_SHIFT;
65 writel(mr_addr, &emif->emif_lpddr2_mode_reg_cfg);
66 if (omap_revision() == OMAP4430_ES2_0)
67 mr = readl(&emif->emif_lpddr2_mode_reg_data_es2);
69 mr = readl(&emif->emif_lpddr2_mode_reg_data);
70 debug("get_mr: EMIF%d cs %d mr %08x val 0x%x\n", emif_num(base),
72 if (((mr & 0x0000ff00) >> 8) == (mr & 0xff) &&
73 ((mr & 0x00ff0000) >> 16) == (mr & 0xff) &&
74 ((mr & 0xff000000) >> 24) == (mr & 0xff))
80 static inline void set_mr(u32 base, u32 cs, u32 mr_addr, u32 mr_val)
82 struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
84 mr_addr |= cs << EMIF_REG_CS_SHIFT;
85 writel(mr_addr, &emif->emif_lpddr2_mode_reg_cfg);
86 writel(mr_val, &emif->emif_lpddr2_mode_reg_data);
89 void emif_reset_phy(u32 base)
91 struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
94 iodft = readl(&emif->emif_iodft_tlgc);
95 iodft |= EMIF_REG_RESET_PHY_MASK;
96 writel(iodft, &emif->emif_iodft_tlgc);
99 static void do_lpddr2_init(u32 base, u32 cs)
102 const struct lpddr2_mr_regs *mr_regs;
104 get_lpddr2_mr_regs(&mr_regs);
105 /* Wait till device auto initialization is complete */
106 while (get_mr(base, cs, LPDDR2_MR0) & LPDDR2_MR0_DAI_MASK)
108 set_mr(base, cs, LPDDR2_MR10, mr_regs->mr10);
111 * Enough loops assuming a maximum of 2GHz
116 set_mr(base, cs, LPDDR2_MR1, mr_regs->mr1);
117 set_mr(base, cs, LPDDR2_MR16, mr_regs->mr16);
120 * Enable refresh along with writing MR2
121 * Encoding of RL in MR2 is (RL - 2)
123 mr_addr = LPDDR2_MR2 | EMIF_REG_REFRESH_EN_MASK;
124 set_mr(base, cs, mr_addr, mr_regs->mr2);
126 if (mr_regs->mr3 > 0)
127 set_mr(base, cs, LPDDR2_MR3, mr_regs->mr3);
130 static void lpddr2_init(u32 base, const struct emif_regs *regs)
132 struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
135 clrbits_le32(&emif->emif_lpddr2_nvm_config, EMIF_REG_CS1NVMEN_MASK);
138 * Keep REG_INITREF_DIS = 1 to prevent re-initialization of SDRAM
139 * when EMIF_SDRAM_CONFIG register is written
141 setbits_le32(&emif->emif_sdram_ref_ctrl, EMIF_REG_INITREF_DIS_MASK);
144 * Set the SDRAM_CONFIG and PHY_CTRL for the
145 * un-locked frequency & default RL
147 writel(regs->sdram_config_init, &emif->emif_sdram_config);
148 writel(regs->emif_ddr_phy_ctlr_1_init, &emif->emif_ddr_phy_ctrl_1);
150 do_ext_phy_settings(base, regs);
152 do_lpddr2_init(base, CS0);
153 if (regs->sdram_config & EMIF_REG_EBANK_MASK)
154 do_lpddr2_init(base, CS1);
156 writel(regs->sdram_config, &emif->emif_sdram_config);
157 writel(regs->emif_ddr_phy_ctlr_1, &emif->emif_ddr_phy_ctrl_1);
159 /* Enable refresh now */
160 clrbits_le32(&emif->emif_sdram_ref_ctrl, EMIF_REG_INITREF_DIS_MASK);
164 __weak void do_ext_phy_settings(u32 base, const struct emif_regs *regs)
168 void emif_update_timings(u32 base, const struct emif_regs *regs)
170 struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
173 writel(regs->ref_ctrl, &emif->emif_sdram_ref_ctrl_shdw);
175 writel(regs->ref_ctrl_final, &emif->emif_sdram_ref_ctrl_shdw);
177 writel(regs->sdram_tim1, &emif->emif_sdram_tim_1_shdw);
178 writel(regs->sdram_tim2, &emif->emif_sdram_tim_2_shdw);
179 writel(regs->sdram_tim3, &emif->emif_sdram_tim_3_shdw);
180 if (omap_revision() == OMAP4430_ES1_0) {
181 /* ES1 bug EMIF should be in force idle during freq_update */
182 writel(0, &emif->emif_pwr_mgmt_ctrl);
184 writel(EMIF_PWR_MGMT_CTRL, &emif->emif_pwr_mgmt_ctrl);
185 writel(EMIF_PWR_MGMT_CTRL_SHDW, &emif->emif_pwr_mgmt_ctrl_shdw);
187 writel(regs->read_idle_ctrl, &emif->emif_read_idlectrl_shdw);
188 writel(regs->zq_config, &emif->emif_zq_config);
189 writel(regs->temp_alert_config, &emif->emif_temp_alert_config);
190 writel(regs->emif_ddr_phy_ctlr_1, &emif->emif_ddr_phy_ctrl_1_shdw);
192 if ((omap_revision() >= OMAP5430_ES1_0) || is_dra7xx()) {
193 writel(EMIF_L3_CONFIG_VAL_SYS_10_MPU_5_LL_0,
194 &emif->emif_l3_config);
195 } else if (omap_revision() >= OMAP4460_ES1_0) {
196 writel(EMIF_L3_CONFIG_VAL_SYS_10_MPU_3_LL_0,
197 &emif->emif_l3_config);
199 writel(EMIF_L3_CONFIG_VAL_SYS_10_LL_0,
200 &emif->emif_l3_config);
204 #ifndef CONFIG_OMAP44XX
205 static void omap5_ddr3_leveling(u32 base, const struct emif_regs *regs)
207 struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
209 /* keep sdram in self-refresh */
210 writel(((LP_MODE_SELF_REFRESH << EMIF_REG_LP_MODE_SHIFT)
211 & EMIF_REG_LP_MODE_MASK), &emif->emif_pwr_mgmt_ctrl);
215 * Set invert_clkout (if activated)--DDR_PHYCTRL_1
216 * Invert clock adds an additional half cycle delay on the
217 * command interface. The additional half cycle, is usually
218 * meant to enable leveling in the situation that DQS is later
219 * than CK on the board.It also helps provide some additional
220 * margin for leveling.
222 writel(regs->emif_ddr_phy_ctlr_1,
223 &emif->emif_ddr_phy_ctrl_1);
225 writel(regs->emif_ddr_phy_ctlr_1,
226 &emif->emif_ddr_phy_ctrl_1_shdw);
229 writel(((LP_MODE_DISABLE << EMIF_REG_LP_MODE_SHIFT)
230 & EMIF_REG_LP_MODE_MASK), &emif->emif_pwr_mgmt_ctrl);
232 /* Launch Full leveling */
233 writel(DDR3_FULL_LVL, &emif->emif_rd_wr_lvl_ctl);
235 /* Wait till full leveling is complete */
236 readl(&emif->emif_rd_wr_lvl_ctl);
239 /* Read data eye leveling no of samples */
240 config_data_eye_leveling_samples(base);
243 * Launch 8 incremental WR_LVL- to compensate for
246 writel(0x2 << EMIF_REG_WRLVLINC_INT_SHIFT,
247 &emif->emif_rd_wr_lvl_ctl);
251 /* Launch Incremental leveling */
252 writel(DDR3_INC_LVL, &emif->emif_rd_wr_lvl_ctl);
256 static void update_hwleveling_output(u32 base, const struct emif_regs *regs)
258 struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
259 u32 *emif_ext_phy_ctrl_reg, *emif_phy_status;
262 emif_phy_status = (u32 *)&emif->emif_ddr_phy_status[6];
263 phy = readl(&emif->emif_ddr_phy_ctrl_1);
265 /* Update PHY_REG_RDDQS_RATIO */
266 emif_ext_phy_ctrl_reg = (u32 *)&emif->emif_ddr_ext_phy_ctrl_7;
267 if (!(phy & EMIF_DDR_PHY_CTRL_1_RDLVL_MASK_MASK))
268 for (i = 0; i < PHY_RDDQS_RATIO_REGS; i++) {
269 reg = readl(emif_phy_status++);
270 writel(reg, emif_ext_phy_ctrl_reg++);
271 writel(reg, emif_ext_phy_ctrl_reg++);
274 /* Update PHY_REG_FIFO_WE_SLAVE_RATIO */
275 emif_ext_phy_ctrl_reg = (u32 *)&emif->emif_ddr_ext_phy_ctrl_2;
276 emif_phy_status = (u32 *)&emif->emif_ddr_phy_status[11];
277 if (!(phy & EMIF_DDR_PHY_CTRL_1_RDLVLGATE_MASK_MASK))
278 for (i = 0; i < PHY_FIFO_WE_SLAVE_RATIO_REGS; i++) {
279 reg = readl(emif_phy_status++);
280 writel(reg, emif_ext_phy_ctrl_reg++);
281 writel(reg, emif_ext_phy_ctrl_reg++);
284 /* Update PHY_REG_WR_DQ/DQS_SLAVE_RATIO */
285 emif_ext_phy_ctrl_reg = (u32 *)&emif->emif_ddr_ext_phy_ctrl_12;
286 emif_phy_status = (u32 *)&emif->emif_ddr_phy_status[16];
287 if (!(phy & EMIF_DDR_PHY_CTRL_1_WRLVL_MASK_MASK))
288 for (i = 0; i < PHY_REG_WR_DQ_SLAVE_RATIO_REGS; i++) {
289 reg = readl(emif_phy_status++);
290 writel(reg, emif_ext_phy_ctrl_reg++);
291 writel(reg, emif_ext_phy_ctrl_reg++);
294 /* Disable Leveling */
295 writel(regs->emif_ddr_phy_ctlr_1, &emif->emif_ddr_phy_ctrl_1);
296 writel(regs->emif_ddr_phy_ctlr_1, &emif->emif_ddr_phy_ctrl_1_shdw);
297 writel(0x0, &emif->emif_rd_wr_lvl_rmp_ctl);
300 static void dra7_ddr3_leveling(u32 base, const struct emif_regs *regs)
302 struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
304 /* Clear Error Status */
305 clrsetbits_le32(&emif->emif_ddr_ext_phy_ctrl_36,
306 EMIF_REG_PHY_FIFO_WE_IN_MISALINED_CLR,
307 EMIF_REG_PHY_FIFO_WE_IN_MISALINED_CLR);
309 clrsetbits_le32(&emif->emif_ddr_ext_phy_ctrl_36_shdw,
310 EMIF_REG_PHY_FIFO_WE_IN_MISALINED_CLR,
311 EMIF_REG_PHY_FIFO_WE_IN_MISALINED_CLR);
313 /* Disable refreshed before leveling */
314 clrsetbits_le32(&emif->emif_sdram_ref_ctrl, EMIF_REG_INITREF_DIS_MASK,
315 EMIF_REG_INITREF_DIS_MASK);
317 /* Start Full leveling */
318 writel(DDR3_FULL_LVL, &emif->emif_rd_wr_lvl_ctl);
322 /* Check for leveling timeout */
323 if (readl(&emif->emif_status) & EMIF_REG_LEVELING_TO_MASK) {
324 printf("Leveling timeout on EMIF%d\n", emif_num(base));
328 /* Enable refreshes after leveling */
329 clrbits_le32(&emif->emif_sdram_ref_ctrl, EMIF_REG_INITREF_DIS_MASK);
331 debug("HW leveling success\n");
333 * Update slave ratios in EXT_PHY_CTRLx registers
334 * as per HW leveling output
336 update_hwleveling_output(base, regs);
339 static void dra7_reset_ddr_data(u32 base, u32 size)
341 #if defined(CONFIG_TI_EDMA3) && !defined(CONFIG_DMA)
342 enable_edma3_clocks();
344 edma3_fill(EDMA3_BASE, 1, (void *)base, 0, size);
346 disable_edma3_clocks();
348 memset((void *)base, 0, size);
352 static void dra7_enable_ecc(u32 base, const struct emif_regs *regs)
354 struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
355 u32 rgn, rgn_start, size, ctrl_reg;
357 /* ECC available only on dra76x EMIF1 */
358 if ((base != EMIF1_BASE) || !is_dra76x())
361 if (regs->emif_ecc_ctrl_reg & EMIF_ECC_CTRL_REG_ECC_EN_MASK) {
362 /* Disable high-order interleaving */
363 clrbits_le32(MA_PRIORITY, MA_HIMEM_INTERLEAVE_UN_MASK);
366 /* Clear the status flags and other history */
367 writel(readl(&emif->emif_1b_ecc_err_cnt),
368 &emif->emif_1b_ecc_err_cnt);
369 writel(0xffffffff, &emif->emif_1b_ecc_err_dist_1);
370 writel(0x2, &emif->emif_1b_ecc_err_addr_log);
371 writel(0x1, &emif->emif_2b_ecc_err_addr_log);
372 writel(EMIF_INT_WR_ECC_ERR_SYS_MASK |
373 EMIF_INT_TWOBIT_ECC_ERR_SYS_MASK |
374 EMIF_INT_ONEBIT_ECC_ERR_SYS_MASK,
375 &emif->emif_irqstatus_sys);
377 writel(regs->emif_ecc_address_range_1,
378 &emif->emif_ecc_address_range_1);
379 writel(regs->emif_ecc_address_range_2,
380 &emif->emif_ecc_address_range_2);
382 /* Disable RMW and ECC verification for read accesses */
383 ctrl_reg = (regs->emif_ecc_ctrl_reg &
384 ~EMIF_ECC_REG_RMW_EN_MASK) |
385 EMIF_ECC_CTRL_REG_ECC_VERIFY_DIS_MASK;
386 writel(ctrl_reg, &emif->emif_ecc_ctrl_reg);
388 /* Set region1 memory with 0 */
389 rgn_start = (regs->emif_ecc_address_range_1 &
390 EMIF_ECC_REG_ECC_START_ADDR_MASK) << 16;
391 rgn = rgn_start + CONFIG_SYS_SDRAM_BASE;
392 size = (regs->emif_ecc_address_range_1 &
393 EMIF_ECC_REG_ECC_END_ADDR_MASK) + 0x10000 - rgn_start;
395 if (regs->emif_ecc_ctrl_reg &
396 EMIF_ECC_REG_ECC_ADDR_RGN_1_EN_MASK)
397 dra7_reset_ddr_data(rgn, size);
399 /* Set region2 memory with 0 */
400 rgn_start = (regs->emif_ecc_address_range_2 &
401 EMIF_ECC_REG_ECC_START_ADDR_MASK) << 16;
402 rgn = rgn_start + CONFIG_SYS_SDRAM_BASE;
403 size = (regs->emif_ecc_address_range_2 &
404 EMIF_ECC_REG_ECC_END_ADDR_MASK) + 0x10000 - rgn_start;
406 if (regs->emif_ecc_ctrl_reg &
407 EMIF_ECC_REG_ECC_ADDR_RGN_2_EN_MASK)
408 dra7_reset_ddr_data(rgn, size);
410 /* Default value enables RMW and ECC verification */
411 writel(regs->emif_ecc_ctrl_reg, &emif->emif_ecc_ctrl_reg);
415 static void dra7_ddr3_init(u32 base, const struct emif_regs *regs)
417 struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
420 emif_reset_phy(base);
421 writel(0x0, &emif->emif_pwr_mgmt_ctrl);
423 do_ext_phy_settings(base, regs);
425 writel(regs->ref_ctrl | EMIF_REG_INITREF_DIS_MASK,
426 &emif->emif_sdram_ref_ctrl);
427 /* Update timing registers */
428 writel(regs->sdram_tim1, &emif->emif_sdram_tim_1);
429 writel(regs->sdram_tim2, &emif->emif_sdram_tim_2);
430 writel(regs->sdram_tim3, &emif->emif_sdram_tim_3);
432 writel(EMIF_L3_CONFIG_VAL_SYS_10_MPU_5_LL_0, &emif->emif_l3_config);
433 writel(regs->read_idle_ctrl, &emif->emif_read_idlectrl);
434 writel(regs->zq_config, &emif->emif_zq_config);
435 writel(regs->temp_alert_config, &emif->emif_temp_alert_config);
436 writel(regs->emif_rd_wr_lvl_rmp_ctl, &emif->emif_rd_wr_lvl_rmp_ctl);
437 writel(regs->emif_rd_wr_lvl_ctl, &emif->emif_rd_wr_lvl_ctl);
439 writel(regs->emif_ddr_phy_ctlr_1_init, &emif->emif_ddr_phy_ctrl_1);
440 writel(regs->emif_rd_wr_exec_thresh, &emif->emif_rd_wr_exec_thresh);
442 writel(regs->ref_ctrl, &emif->emif_sdram_ref_ctrl);
444 writel(regs->sdram_config2, &emif->emif_lpddr2_nvm_config);
445 writel(regs->sdram_config_init, &emif->emif_sdram_config);
449 writel(regs->ref_ctrl_final, &emif->emif_sdram_ref_ctrl);
451 if (regs->emif_rd_wr_lvl_rmp_ctl & EMIF_REG_RDWRLVL_EN_MASK) {
453 * Perform Dummy ECC setup just to allow hardware
454 * leveling of ECC memories
456 if (is_dra76x() && (base == EMIF1_BASE) &&
457 (regs->emif_ecc_ctrl_reg & EMIF_ECC_CTRL_REG_ECC_EN_MASK)) {
458 writel(0, &emif->emif_ecc_address_range_1);
459 writel(0, &emif->emif_ecc_address_range_2);
460 writel(EMIF_ECC_CTRL_REG_ECC_EN_MASK |
461 EMIF_ECC_CTRL_REG_ECC_ADDR_RGN_PROT_MASK,
462 &emif->emif_ecc_ctrl_reg);
465 dra7_ddr3_leveling(base, regs);
469 writel(0, &emif->emif_ecc_ctrl_reg);
472 /* Enable ECC as necessary */
473 dra7_enable_ecc(base, regs);
476 static void omap5_ddr3_init(u32 base, const struct emif_regs *regs)
478 struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
480 writel(regs->ref_ctrl, &emif->emif_sdram_ref_ctrl);
481 writel(regs->sdram_config_init, &emif->emif_sdram_config);
483 * Set SDRAM_CONFIG and PHY control registers to locked frequency
484 * and RL =7. As the default values of the Mode Registers are not
485 * defined, contents of mode Registers must be fully initialized.
486 * H/W takes care of this initialization
488 writel(regs->emif_ddr_phy_ctlr_1_init, &emif->emif_ddr_phy_ctrl_1);
490 /* Update timing registers */
491 writel(regs->sdram_tim1, &emif->emif_sdram_tim_1);
492 writel(regs->sdram_tim2, &emif->emif_sdram_tim_2);
493 writel(regs->sdram_tim3, &emif->emif_sdram_tim_3);
495 writel(regs->read_idle_ctrl, &emif->emif_read_idlectrl);
497 writel(regs->sdram_config2, &emif->emif_lpddr2_nvm_config);
498 writel(regs->sdram_config_init, &emif->emif_sdram_config);
499 do_ext_phy_settings(base, regs);
501 writel(regs->emif_rd_wr_lvl_rmp_ctl, &emif->emif_rd_wr_lvl_rmp_ctl);
502 omap5_ddr3_leveling(base, regs);
505 static void ddr3_init(u32 base, const struct emif_regs *regs)
508 omap5_ddr3_init(base, regs);
510 dra7_ddr3_init(base, regs);
514 #ifndef CONFIG_SYS_EMIF_PRECALCULATED_TIMING_REGS
515 #define print_timing_reg(reg) debug(#reg" - 0x%08x\n", (reg))
518 * Organization and refresh requirements for LPDDR2 devices of different
519 * types and densities. Derived from JESD209-2 section 2.4
521 const struct lpddr2_addressing addressing_table[] = {
522 /* Banks tREFIx10 rowx32,rowx16 colx32,colx16 density */
523 {BANKS4, T_REFI_15_6, {ROW_12, ROW_12}, {COL_7, COL_8} },/*64M */
524 {BANKS4, T_REFI_15_6, {ROW_12, ROW_12}, {COL_8, COL_9} },/*128M */
525 {BANKS4, T_REFI_7_8, {ROW_13, ROW_13}, {COL_8, COL_9} },/*256M */
526 {BANKS4, T_REFI_7_8, {ROW_13, ROW_13}, {COL_9, COL_10} },/*512M */
527 {BANKS8, T_REFI_7_8, {ROW_13, ROW_13}, {COL_9, COL_10} },/*1GS4 */
528 {BANKS8, T_REFI_3_9, {ROW_14, ROW_14}, {COL_9, COL_10} },/*2GS4 */
529 {BANKS8, T_REFI_3_9, {ROW_14, ROW_14}, {COL_10, COL_11} },/*4G */
530 {BANKS8, T_REFI_3_9, {ROW_15, ROW_15}, {COL_10, COL_11} },/*8G */
531 {BANKS4, T_REFI_7_8, {ROW_14, ROW_14}, {COL_9, COL_10} },/*1GS2 */
532 {BANKS4, T_REFI_3_9, {ROW_15, ROW_15}, {COL_9, COL_10} },/*2GS2 */
535 static const u32 lpddr2_density_2_size_in_mbytes[] = {
549 * Calculate the period of DDR clock from frequency value and set the
550 * denominator and numerator in global variables for easy access later
552 static void set_ddr_clk_period(u32 freq)
556 * period_in_ns = 10^9/freq
560 cancel_out(T_num, T_den, 200);
565 * Convert time in nano seconds to number of cycles of DDR clock
567 static inline u32 ns_2_cycles(u32 ns)
569 return ((ns * (*T_den)) + (*T_num) - 1) / (*T_num);
573 * ns_2_cycles with the difference that the time passed is 2 times the actual
574 * value(to avoid fractions). The cycles returned is for the original value of
575 * the timing parameter
577 static inline u32 ns_x2_2_cycles(u32 ns)
579 return ((ns * (*T_den)) + (*T_num) * 2 - 1) / ((*T_num) * 2);
583 * Find addressing table index based on the device's type(S2 or S4) and
586 s8 addressing_table_index(u8 type, u8 density, u8 width)
589 if ((density > LPDDR2_DENSITY_8Gb) || (width == LPDDR2_IO_WIDTH_8))
593 * Look at the way ADDR_TABLE_INDEX* values have been defined
594 * in emif.h compared to LPDDR2_DENSITY_* values
595 * The table is layed out in the increasing order of density
596 * (ignoring type). The exceptions 1GS2 and 2GS2 have been placed
599 if ((type == LPDDR2_TYPE_S2) && (density == LPDDR2_DENSITY_1Gb))
600 index = ADDR_TABLE_INDEX1GS2;
601 else if ((type == LPDDR2_TYPE_S2) && (density == LPDDR2_DENSITY_2Gb))
602 index = ADDR_TABLE_INDEX2GS2;
606 debug("emif: addressing table index %d\n", index);
612 * Find the the right timing table from the array of timing
613 * tables of the device using DDR clock frequency
615 static const struct lpddr2_ac_timings *get_timings_table(const struct
616 lpddr2_ac_timings *const *device_timings,
619 u32 i, temp, freq_nearest;
620 const struct lpddr2_ac_timings *timings = 0;
622 emif_assert(freq <= MAX_LPDDR2_FREQ);
623 emif_assert(device_timings);
626 * Start with the maximum allowed frequency - that is always safe
628 freq_nearest = MAX_LPDDR2_FREQ;
630 * Find the timings table that has the max frequency value:
631 * i. Above or equal to the DDR frequency - safe
632 * ii. The lowest that satisfies condition (i) - optimal
634 for (i = 0; (i < MAX_NUM_SPEEDBINS) && device_timings[i]; i++) {
635 temp = device_timings[i]->max_freq;
636 if ((temp >= freq) && (temp <= freq_nearest)) {
638 timings = device_timings[i];
641 debug("emif: timings table: %d\n", freq_nearest);
646 * Finds the value of emif_sdram_config_reg
647 * All parameters are programmed based on the device on CS0.
648 * If there is a device on CS1, it will be same as that on CS0 or
649 * it will be NVM. We don't support NVM yet.
650 * If cs1_device pointer is NULL it is assumed that there is no device
653 static u32 get_sdram_config_reg(const struct lpddr2_device_details *cs0_device,
654 const struct lpddr2_device_details *cs1_device,
655 const struct lpddr2_addressing *addressing,
660 config_reg |= (cs0_device->type + 4) << EMIF_REG_SDRAM_TYPE_SHIFT;
661 config_reg |= EMIF_INTERLEAVING_POLICY_MAX_INTERLEAVING <<
662 EMIF_REG_IBANK_POS_SHIFT;
664 config_reg |= cs0_device->io_width << EMIF_REG_NARROW_MODE_SHIFT;
666 config_reg |= RL << EMIF_REG_CL_SHIFT;
668 config_reg |= addressing->row_sz[cs0_device->io_width] <<
669 EMIF_REG_ROWSIZE_SHIFT;
671 config_reg |= addressing->num_banks << EMIF_REG_IBANK_SHIFT;
673 config_reg |= (cs1_device ? EBANK_CS1_EN : EBANK_CS1_DIS) <<
674 EMIF_REG_EBANK_SHIFT;
676 config_reg |= addressing->col_sz[cs0_device->io_width] <<
677 EMIF_REG_PAGESIZE_SHIFT;
682 static u32 get_sdram_ref_ctrl(u32 freq,
683 const struct lpddr2_addressing *addressing)
685 u32 ref_ctrl = 0, val = 0, freq_khz;
686 freq_khz = freq / 1000;
688 * refresh rate to be set is 'tREFI * freq in MHz
689 * division by 10000 to account for khz and x10 in t_REFI_us_x10
691 val = addressing->t_REFI_us_x10 * freq_khz / 10000;
692 ref_ctrl |= val << EMIF_REG_REFRESH_RATE_SHIFT;
697 static u32 get_sdram_tim_1_reg(const struct lpddr2_ac_timings *timings,
698 const struct lpddr2_min_tck *min_tck,
699 const struct lpddr2_addressing *addressing)
701 u32 tim1 = 0, val = 0;
702 val = max(min_tck->tWTR, ns_x2_2_cycles(timings->tWTRx2)) - 1;
703 tim1 |= val << EMIF_REG_T_WTR_SHIFT;
705 if (addressing->num_banks == BANKS8)
706 val = (timings->tFAW * (*T_den) + 4 * (*T_num) - 1) /
709 val = max(min_tck->tRRD, ns_2_cycles(timings->tRRD)) - 1;
711 tim1 |= val << EMIF_REG_T_RRD_SHIFT;
713 val = ns_2_cycles(timings->tRASmin + timings->tRPab) - 1;
714 tim1 |= val << EMIF_REG_T_RC_SHIFT;
716 val = max(min_tck->tRAS_MIN, ns_2_cycles(timings->tRASmin)) - 1;
717 tim1 |= val << EMIF_REG_T_RAS_SHIFT;
719 val = max(min_tck->tWR, ns_2_cycles(timings->tWR)) - 1;
720 tim1 |= val << EMIF_REG_T_WR_SHIFT;
722 val = max(min_tck->tRCD, ns_2_cycles(timings->tRCD)) - 1;
723 tim1 |= val << EMIF_REG_T_RCD_SHIFT;
725 val = max(min_tck->tRP_AB, ns_2_cycles(timings->tRPab)) - 1;
726 tim1 |= val << EMIF_REG_T_RP_SHIFT;
731 static u32 get_sdram_tim_2_reg(const struct lpddr2_ac_timings *timings,
732 const struct lpddr2_min_tck *min_tck)
734 u32 tim2 = 0, val = 0;
735 val = max(min_tck->tCKE, timings->tCKE) - 1;
736 tim2 |= val << EMIF_REG_T_CKE_SHIFT;
738 val = max(min_tck->tRTP, ns_x2_2_cycles(timings->tRTPx2)) - 1;
739 tim2 |= val << EMIF_REG_T_RTP_SHIFT;
742 * tXSRD = tRFCab + 10 ns. XSRD and XSNR should have the
745 val = ns_2_cycles(timings->tXSR) - 1;
746 tim2 |= val << EMIF_REG_T_XSRD_SHIFT;
747 tim2 |= val << EMIF_REG_T_XSNR_SHIFT;
749 val = max(min_tck->tXP, ns_x2_2_cycles(timings->tXPx2)) - 1;
750 tim2 |= val << EMIF_REG_T_XP_SHIFT;
755 static u32 get_sdram_tim_3_reg(const struct lpddr2_ac_timings *timings,
756 const struct lpddr2_min_tck *min_tck,
757 const struct lpddr2_addressing *addressing)
759 u32 tim3 = 0, val = 0;
760 val = min(timings->tRASmax * 10 / addressing->t_REFI_us_x10 - 1, 0xF);
761 tim3 |= val << EMIF_REG_T_RAS_MAX_SHIFT;
763 val = ns_2_cycles(timings->tRFCab) - 1;
764 tim3 |= val << EMIF_REG_T_RFC_SHIFT;
766 val = ns_x2_2_cycles(timings->tDQSCKMAXx2) - 1;
767 tim3 |= val << EMIF_REG_T_TDQSCKMAX_SHIFT;
769 val = ns_2_cycles(timings->tZQCS) - 1;
770 tim3 |= val << EMIF_REG_ZQ_ZQCS_SHIFT;
772 val = max(min_tck->tCKESR, ns_2_cycles(timings->tCKESR)) - 1;
773 tim3 |= val << EMIF_REG_T_CKESR_SHIFT;
778 static u32 get_zq_config_reg(const struct lpddr2_device_details *cs1_device,
779 const struct lpddr2_addressing *addressing,
785 EMIF_ZQCS_INTERVAL_DVFS_IN_US * 10 /
786 addressing->t_REFI_us_x10;
789 EMIF_ZQCS_INTERVAL_NORMAL_IN_US * 10 /
790 addressing->t_REFI_us_x10;
791 zq |= val << EMIF_REG_ZQ_REFINTERVAL_SHIFT;
793 zq |= (REG_ZQ_ZQCL_MULT - 1) << EMIF_REG_ZQ_ZQCL_MULT_SHIFT;
795 zq |= (REG_ZQ_ZQINIT_MULT - 1) << EMIF_REG_ZQ_ZQINIT_MULT_SHIFT;
797 zq |= REG_ZQ_SFEXITEN_ENABLE << EMIF_REG_ZQ_SFEXITEN_SHIFT;
800 * Assuming that two chipselects have a single calibration resistor
801 * If there are indeed two calibration resistors, then this flag should
802 * be enabled to take advantage of dual calibration feature.
803 * This data should ideally come from board files. But considering
804 * that none of the boards today have calibration resistors per CS,
805 * it would be an unnecessary overhead.
807 zq |= REG_ZQ_DUALCALEN_DISABLE << EMIF_REG_ZQ_DUALCALEN_SHIFT;
809 zq |= REG_ZQ_CS0EN_ENABLE << EMIF_REG_ZQ_CS0EN_SHIFT;
811 zq |= (cs1_device ? 1 : 0) << EMIF_REG_ZQ_CS1EN_SHIFT;
816 static u32 get_temp_alert_config(const struct lpddr2_device_details *cs1_device,
817 const struct lpddr2_addressing *addressing,
820 u32 alert = 0, interval;
822 TEMP_ALERT_POLL_INTERVAL_MS * 10000 / addressing->t_REFI_us_x10;
825 alert |= interval << EMIF_REG_TA_REFINTERVAL_SHIFT;
827 alert |= TEMP_ALERT_CONFIG_DEVCT_1 << EMIF_REG_TA_DEVCNT_SHIFT;
829 alert |= TEMP_ALERT_CONFIG_DEVWDT_32 << EMIF_REG_TA_DEVWDT_SHIFT;
831 alert |= 1 << EMIF_REG_TA_SFEXITEN_SHIFT;
833 alert |= 1 << EMIF_REG_TA_CS0EN_SHIFT;
835 alert |= (cs1_device ? 1 : 0) << EMIF_REG_TA_CS1EN_SHIFT;
840 static u32 get_read_idle_ctrl_reg(u8 volt_ramp)
842 u32 idle = 0, val = 0;
844 val = ns_2_cycles(READ_IDLE_INTERVAL_DVFS) / 64 - 1;
846 /*Maximum value in normal conditions - suggested by hw team */
848 idle |= val << EMIF_REG_READ_IDLE_INTERVAL_SHIFT;
850 idle |= EMIF_REG_READ_IDLE_LEN_VAL << EMIF_REG_READ_IDLE_LEN_SHIFT;
855 static u32 get_ddr_phy_ctrl_1(u32 freq, u8 RL)
857 u32 phy = 0, val = 0;
859 phy |= (RL + 2) << EMIF_REG_READ_LATENCY_SHIFT;
861 if (freq <= 100000000)
862 val = EMIF_DLL_SLAVE_DLY_CTRL_100_MHZ_AND_LESS;
863 else if (freq <= 200000000)
864 val = EMIF_DLL_SLAVE_DLY_CTRL_200_MHZ;
866 val = EMIF_DLL_SLAVE_DLY_CTRL_400_MHZ;
867 phy |= val << EMIF_REG_DLL_SLAVE_DLY_CTRL_SHIFT;
869 /* Other fields are constant magic values. Hardcode them together */
870 phy |= EMIF_DDR_PHY_CTRL_1_BASE_VAL <<
871 EMIF_EMIF_DDR_PHY_CTRL_1_BASE_VAL_SHIFT;
876 static u32 get_emif_mem_size(u32 base)
878 u32 size_mbytes = 0, temp;
879 struct emif_device_details dev_details;
880 struct lpddr2_device_details cs0_dev_details, cs1_dev_details;
881 u32 emif_nr = emif_num(base);
883 emif_reset_phy(base);
884 dev_details.cs0_device_details = emif_get_device_details(emif_nr, CS0,
886 dev_details.cs1_device_details = emif_get_device_details(emif_nr, CS1,
888 emif_reset_phy(base);
890 if (dev_details.cs0_device_details) {
891 temp = dev_details.cs0_device_details->density;
892 size_mbytes += lpddr2_density_2_size_in_mbytes[temp];
895 if (dev_details.cs1_device_details) {
896 temp = dev_details.cs1_device_details->density;
897 size_mbytes += lpddr2_density_2_size_in_mbytes[temp];
899 /* convert to bytes */
900 return size_mbytes << 20;
903 /* Gets the encoding corresponding to a given DMM section size */
904 u32 get_dmm_section_size_map(u32 section_size)
907 * Section size mapping:
908 * 0x0: 16-MiB section
909 * 0x1: 32-MiB section
910 * 0x2: 64-MiB section
911 * 0x3: 128-MiB section
912 * 0x4: 256-MiB section
913 * 0x5: 512-MiB section
917 section_size >>= 24; /* divide by 16 MB */
918 return log_2_n_round_down(section_size);
921 static void emif_calculate_regs(
922 const struct emif_device_details *emif_dev_details,
923 u32 freq, struct emif_regs *regs)
926 const struct lpddr2_addressing *addressing;
927 const struct lpddr2_ac_timings *timings;
928 const struct lpddr2_min_tck *min_tck;
929 const struct lpddr2_device_details *cs0_dev_details =
930 emif_dev_details->cs0_device_details;
931 const struct lpddr2_device_details *cs1_dev_details =
932 emif_dev_details->cs1_device_details;
933 const struct lpddr2_device_timings *cs0_dev_timings =
934 emif_dev_details->cs0_device_timings;
936 emif_assert(emif_dev_details);
939 * You can not have a device on CS1 without one on CS0
940 * So configuring EMIF without a device on CS0 doesn't
943 emif_assert(cs0_dev_details);
944 emif_assert(cs0_dev_details->type != LPDDR2_TYPE_NVM);
946 * If there is a device on CS1 it should be same type as CS0
947 * (or NVM. But NVM is not supported in this driver yet)
949 emif_assert((cs1_dev_details == NULL) ||
950 (cs1_dev_details->type == LPDDR2_TYPE_NVM) ||
951 (cs0_dev_details->type == cs1_dev_details->type));
952 emif_assert(freq <= MAX_LPDDR2_FREQ);
954 set_ddr_clk_period(freq);
957 * The device on CS0 is used for all timing calculations
958 * There is only one set of registers for timings per EMIF. So, if the
959 * second CS(CS1) has a device, it should have the same timings as the
962 timings = get_timings_table(cs0_dev_timings->ac_timings, freq);
963 emif_assert(timings);
964 min_tck = cs0_dev_timings->min_tck;
966 temp = addressing_table_index(cs0_dev_details->type,
967 cs0_dev_details->density,
968 cs0_dev_details->io_width);
970 emif_assert((temp >= 0));
971 addressing = &(addressing_table[temp]);
972 emif_assert(addressing);
974 sys_freq = get_sys_clk_freq();
976 regs->sdram_config_init = get_sdram_config_reg(cs0_dev_details,
978 addressing, RL_BOOT);
980 regs->sdram_config = get_sdram_config_reg(cs0_dev_details,
982 addressing, RL_FINAL);
984 regs->ref_ctrl = get_sdram_ref_ctrl(freq, addressing);
986 regs->sdram_tim1 = get_sdram_tim_1_reg(timings, min_tck, addressing);
988 regs->sdram_tim2 = get_sdram_tim_2_reg(timings, min_tck);
990 regs->sdram_tim3 = get_sdram_tim_3_reg(timings, min_tck, addressing);
992 regs->read_idle_ctrl = get_read_idle_ctrl_reg(LPDDR2_VOLTAGE_STABLE);
994 regs->temp_alert_config =
995 get_temp_alert_config(cs1_dev_details, addressing, 0);
997 regs->zq_config = get_zq_config_reg(cs1_dev_details, addressing,
998 LPDDR2_VOLTAGE_STABLE);
1000 regs->emif_ddr_phy_ctlr_1_init =
1001 get_ddr_phy_ctrl_1(sys_freq / 2, RL_BOOT);
1003 regs->emif_ddr_phy_ctlr_1 =
1004 get_ddr_phy_ctrl_1(freq, RL_FINAL);
1008 print_timing_reg(regs->sdram_config_init);
1009 print_timing_reg(regs->sdram_config);
1010 print_timing_reg(regs->ref_ctrl);
1011 print_timing_reg(regs->sdram_tim1);
1012 print_timing_reg(regs->sdram_tim2);
1013 print_timing_reg(regs->sdram_tim3);
1014 print_timing_reg(regs->read_idle_ctrl);
1015 print_timing_reg(regs->temp_alert_config);
1016 print_timing_reg(regs->zq_config);
1017 print_timing_reg(regs->emif_ddr_phy_ctlr_1);
1018 print_timing_reg(regs->emif_ddr_phy_ctlr_1_init);
1020 #endif /* CONFIG_SYS_EMIF_PRECALCULATED_TIMING_REGS */
1022 #ifdef CONFIG_SYS_AUTOMATIC_SDRAM_DETECTION
1023 const char *get_lpddr2_type(u8 type_id)
1026 case LPDDR2_TYPE_S4:
1028 case LPDDR2_TYPE_S2:
1035 const char *get_lpddr2_io_width(u8 width_id)
1038 case LPDDR2_IO_WIDTH_8:
1040 case LPDDR2_IO_WIDTH_16:
1042 case LPDDR2_IO_WIDTH_32:
1049 const char *get_lpddr2_manufacturer(u32 manufacturer)
1051 switch (manufacturer) {
1052 case LPDDR2_MANUFACTURER_SAMSUNG:
1054 case LPDDR2_MANUFACTURER_QIMONDA:
1056 case LPDDR2_MANUFACTURER_ELPIDA:
1058 case LPDDR2_MANUFACTURER_ETRON:
1060 case LPDDR2_MANUFACTURER_NANYA:
1062 case LPDDR2_MANUFACTURER_HYNIX:
1064 case LPDDR2_MANUFACTURER_MOSEL:
1066 case LPDDR2_MANUFACTURER_WINBOND:
1068 case LPDDR2_MANUFACTURER_ESMT:
1070 case LPDDR2_MANUFACTURER_SPANSION:
1072 case LPDDR2_MANUFACTURER_SST:
1074 case LPDDR2_MANUFACTURER_ZMOS:
1076 case LPDDR2_MANUFACTURER_INTEL:
1078 case LPDDR2_MANUFACTURER_NUMONYX:
1080 case LPDDR2_MANUFACTURER_MICRON:
1087 static void display_sdram_details(u32 emif_nr, u32 cs,
1088 struct lpddr2_device_details *device)
1090 const char *mfg_str;
1091 const char *type_str;
1092 char density_str[10];
1095 debug("EMIF%d CS%d\t", emif_nr, cs);
1102 mfg_str = get_lpddr2_manufacturer(device->manufacturer);
1103 type_str = get_lpddr2_type(device->type);
1105 density = lpddr2_density_2_size_in_mbytes[device->density];
1106 if ((density / 1024 * 1024) == density) {
1108 sprintf(density_str, "%d GB", density);
1110 sprintf(density_str, "%d MB", density);
1111 if (mfg_str && type_str)
1112 debug("%s\t\t%s\t%s\n", mfg_str, type_str, density_str);
1115 static u8 is_lpddr2_sdram_present(u32 base, u32 cs,
1116 struct lpddr2_device_details *lpddr2_device)
1120 mr = get_mr(base, cs, LPDDR2_MR0);
1122 /* Mode register value bigger than 8 bit */
1126 temp = (mr & LPDDR2_MR0_DI_MASK) >> LPDDR2_MR0_DI_SHIFT;
1131 temp = (mr & LPDDR2_MR0_DNVI_MASK) >> LPDDR2_MR0_DNVI_SHIFT;
1134 /* DNV supported - But DNV is only supported for NVM */
1138 mr = get_mr(base, cs, LPDDR2_MR4);
1140 /* Mode register value bigger than 8 bit */
1144 mr = get_mr(base, cs, LPDDR2_MR5);
1146 /* Mode register value bigger than 8 bit */
1150 if (!get_lpddr2_manufacturer(mr)) {
1151 /* Manufacturer not identified */
1154 lpddr2_device->manufacturer = mr;
1156 mr = get_mr(base, cs, LPDDR2_MR6);
1158 /* Mode register value bigger than 8 bit */
1162 mr = get_mr(base, cs, LPDDR2_MR7);
1164 /* Mode register value bigger than 8 bit */
1168 mr = get_mr(base, cs, LPDDR2_MR8);
1170 /* Mode register value bigger than 8 bit */
1174 temp = (mr & MR8_TYPE_MASK) >> MR8_TYPE_SHIFT;
1175 if (!get_lpddr2_type(temp)) {
1179 lpddr2_device->type = temp;
1181 temp = (mr & MR8_DENSITY_MASK) >> MR8_DENSITY_SHIFT;
1182 if (temp > LPDDR2_DENSITY_32Gb) {
1183 /* Density not supported */
1186 lpddr2_device->density = temp;
1188 temp = (mr & MR8_IO_WIDTH_MASK) >> MR8_IO_WIDTH_SHIFT;
1189 if (!get_lpddr2_io_width(temp)) {
1190 /* IO width unsupported value */
1193 lpddr2_device->io_width = temp;
1196 * If all the above tests pass we should
1197 * have a device on this chip-select
1202 struct lpddr2_device_details *emif_get_device_details(u32 emif_nr, u8 cs,
1203 struct lpddr2_device_details *lpddr2_dev_details)
1206 u32 base = (emif_nr == 1) ? EMIF1_BASE : EMIF2_BASE;
1208 struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
1210 if (!lpddr2_dev_details)
1213 /* Do the minimum init for mode register accesses */
1214 if (!(running_from_sdram() || warm_reset())) {
1215 phy = get_ddr_phy_ctrl_1(get_sys_clk_freq() / 2, RL_BOOT);
1216 writel(phy, &emif->emif_ddr_phy_ctrl_1);
1219 if (!(is_lpddr2_sdram_present(base, cs, lpddr2_dev_details)))
1222 display_sdram_details(emif_num(base), cs, lpddr2_dev_details);
1224 return lpddr2_dev_details;
1226 #endif /* CONFIG_SYS_AUTOMATIC_SDRAM_DETECTION */
1228 static void do_sdram_init(u32 base)
1230 const struct emif_regs *regs;
1231 u32 in_sdram, emif_nr;
1233 debug(">>do_sdram_init() %x\n", base);
1235 in_sdram = running_from_sdram();
1236 emif_nr = (base == EMIF1_BASE) ? 1 : 2;
1238 #ifdef CONFIG_SYS_EMIF_PRECALCULATED_TIMING_REGS
1239 emif_get_reg_dump(emif_nr, ®s);
1241 debug("EMIF: reg dump not provided\n");
1246 * The user has not provided the register values. We need to
1247 * calculate it based on the timings and the DDR frequency
1249 struct emif_device_details dev_details;
1250 struct emif_regs calculated_regs;
1253 * Get device details:
1254 * - Discovered if CONFIG_SYS_AUTOMATIC_SDRAM_DETECTION is set
1255 * - Obtained from user otherwise
1257 struct lpddr2_device_details cs0_dev_details, cs1_dev_details;
1258 emif_reset_phy(base);
1259 dev_details.cs0_device_details = emif_get_device_details(emif_nr, CS0,
1261 dev_details.cs1_device_details = emif_get_device_details(emif_nr, CS1,
1263 emif_reset_phy(base);
1265 /* Return if no devices on this EMIF */
1266 if (!dev_details.cs0_device_details &&
1267 !dev_details.cs1_device_details) {
1272 * Get device timings:
1273 * - Default timings specified by JESD209-2 if
1274 * CONFIG_SYS_DEFAULT_LPDDR2_TIMINGS is set
1275 * - Obtained from user otherwise
1277 emif_get_device_timings(emif_nr, &dev_details.cs0_device_timings,
1278 &dev_details.cs1_device_timings);
1280 /* Calculate the register values */
1281 emif_calculate_regs(&dev_details, omap_ddr_clk(), &calculated_regs);
1282 regs = &calculated_regs;
1283 #endif /* CONFIG_SYS_EMIF_PRECALCULATED_TIMING_REGS */
1286 * Initializing the DDR device can not happen from SDRAM.
1287 * Changing the timing registers in EMIF can happen(going from one
1290 if (!in_sdram && (!warm_reset() || is_dra7xx())) {
1291 if (emif_sdram_type(regs->sdram_config) ==
1292 EMIF_SDRAM_TYPE_LPDDR2)
1293 lpddr2_init(base, regs);
1294 #ifndef CONFIG_OMAP44XX
1296 ddr3_init(base, regs);
1299 #ifdef CONFIG_OMAP54XX
1300 if (warm_reset() && (emif_sdram_type(regs->sdram_config) ==
1301 EMIF_SDRAM_TYPE_DDR3) && !is_dra7xx()) {
1302 set_lpmode_selfrefresh(base);
1303 emif_reset_phy(base);
1304 omap5_ddr3_leveling(base, regs);
1308 /* Write to the shadow registers */
1309 emif_update_timings(base, regs);
1311 debug("<<do_sdram_init() %x\n", base);
1314 void emif_post_init_config(u32 base)
1316 struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
1317 u32 omap_rev = omap_revision();
1319 /* reset phy on ES2.0 */
1320 if (omap_rev == OMAP4430_ES2_0)
1321 emif_reset_phy(base);
1323 /* Put EMIF back in smart idle on ES1.0 */
1324 if (omap_rev == OMAP4430_ES1_0)
1325 writel(0x80000000, &emif->emif_pwr_mgmt_ctrl);
1328 void dmm_init(u32 base)
1330 const struct dmm_lisa_map_regs *lisa_map_regs;
1331 u32 i, section, valid;
1333 #ifdef CONFIG_SYS_EMIF_PRECALCULATED_TIMING_REGS
1334 emif_get_dmm_regs(&lisa_map_regs);
1336 u32 emif1_size, emif2_size, mapped_size, section_map = 0;
1337 u32 section_cnt, sys_addr;
1338 struct dmm_lisa_map_regs lis_map_regs_calculated = {0};
1342 sys_addr = CONFIG_SYS_SDRAM_BASE;
1343 emif1_size = get_emif_mem_size(EMIF1_BASE);
1344 emif2_size = get_emif_mem_size(EMIF2_BASE);
1345 debug("emif1_size 0x%x emif2_size 0x%x\n", emif1_size, emif2_size);
1347 if (!emif1_size && !emif2_size)
1350 /* symmetric interleaved section */
1351 if (emif1_size && emif2_size) {
1352 mapped_size = min(emif1_size, emif2_size);
1353 section_map = DMM_LISA_MAP_INTERLEAVED_BASE_VAL;
1354 section_map |= 0 << EMIF_SDRC_ADDR_SHIFT;
1356 section_map |= (sys_addr >> 24) <<
1357 EMIF_SYS_ADDR_SHIFT;
1358 section_map |= get_dmm_section_size_map(mapped_size * 2)
1359 << EMIF_SYS_SIZE_SHIFT;
1360 lis_map_regs_calculated.dmm_lisa_map_3 = section_map;
1361 emif1_size -= mapped_size;
1362 emif2_size -= mapped_size;
1363 sys_addr += (mapped_size * 2);
1368 * Single EMIF section(we can have a maximum of 1 single EMIF
1369 * section- either EMIF1 or EMIF2 or none, but not both)
1372 section_map = DMM_LISA_MAP_EMIF1_ONLY_BASE_VAL;
1373 section_map |= get_dmm_section_size_map(emif1_size)
1374 << EMIF_SYS_SIZE_SHIFT;
1376 section_map |= (mapped_size >> 24) <<
1377 EMIF_SDRC_ADDR_SHIFT;
1379 section_map |= (sys_addr >> 24) << EMIF_SYS_ADDR_SHIFT;
1383 section_map = DMM_LISA_MAP_EMIF2_ONLY_BASE_VAL;
1384 section_map |= get_dmm_section_size_map(emif2_size) <<
1385 EMIF_SYS_SIZE_SHIFT;
1387 section_map |= mapped_size >> 24 << EMIF_SDRC_ADDR_SHIFT;
1389 section_map |= sys_addr >> 24 << EMIF_SYS_ADDR_SHIFT;
1393 if (section_cnt == 2) {
1394 /* Only 1 section - either symmetric or single EMIF */
1395 lis_map_regs_calculated.dmm_lisa_map_3 = section_map;
1396 lis_map_regs_calculated.dmm_lisa_map_2 = 0;
1397 lis_map_regs_calculated.dmm_lisa_map_1 = 0;
1399 /* 2 sections - 1 symmetric, 1 single EMIF */
1400 lis_map_regs_calculated.dmm_lisa_map_2 = section_map;
1401 lis_map_regs_calculated.dmm_lisa_map_1 = 0;
1404 /* TRAP for invalid TILER mappings in section 0 */
1405 lis_map_regs_calculated.dmm_lisa_map_0 = DMM_LISA_MAP_0_INVAL_ADDR_TRAP;
1407 if (omap_revision() >= OMAP4460_ES1_0)
1408 lis_map_regs_calculated.is_ma_present = 1;
1410 lisa_map_regs = &lis_map_regs_calculated;
1412 struct dmm_lisa_map_regs *hw_lisa_map_regs =
1413 (struct dmm_lisa_map_regs *)base;
1415 writel(0, &hw_lisa_map_regs->dmm_lisa_map_3);
1416 writel(0, &hw_lisa_map_regs->dmm_lisa_map_2);
1417 writel(0, &hw_lisa_map_regs->dmm_lisa_map_1);
1418 writel(0, &hw_lisa_map_regs->dmm_lisa_map_0);
1420 writel(lisa_map_regs->dmm_lisa_map_3,
1421 &hw_lisa_map_regs->dmm_lisa_map_3);
1422 writel(lisa_map_regs->dmm_lisa_map_2,
1423 &hw_lisa_map_regs->dmm_lisa_map_2);
1424 writel(lisa_map_regs->dmm_lisa_map_1,
1425 &hw_lisa_map_regs->dmm_lisa_map_1);
1426 writel(lisa_map_regs->dmm_lisa_map_0,
1427 &hw_lisa_map_regs->dmm_lisa_map_0);
1429 if (lisa_map_regs->is_ma_present) {
1431 (struct dmm_lisa_map_regs *)MA_BASE;
1433 writel(lisa_map_regs->dmm_lisa_map_3,
1434 &hw_lisa_map_regs->dmm_lisa_map_3);
1435 writel(lisa_map_regs->dmm_lisa_map_2,
1436 &hw_lisa_map_regs->dmm_lisa_map_2);
1437 writel(lisa_map_regs->dmm_lisa_map_1,
1438 &hw_lisa_map_regs->dmm_lisa_map_1);
1439 writel(lisa_map_regs->dmm_lisa_map_0,
1440 &hw_lisa_map_regs->dmm_lisa_map_0);
1442 setbits_le32(MA_PRIORITY, MA_HIMEM_INTERLEAVE_UN_MASK);
1446 * EMIF should be configured only when
1447 * memory is mapped on it. Using emif1_enabled
1448 * and emif2_enabled variables for this.
1452 for (i = 0; i < 4; i++) {
1453 section = __raw_readl(DMM_BASE + i*4);
1454 valid = (section & EMIF_SDRC_MAP_MASK) >>
1455 (EMIF_SDRC_MAP_SHIFT);
1470 static void do_bug0039_workaround(u32 base)
1472 u32 val, i, clkctrl;
1473 struct emif_reg_struct *emif_base = (struct emif_reg_struct *)base;
1474 const struct read_write_regs *bug_00339_regs;
1476 u32 *phy_status_base = &emif_base->emif_ddr_phy_status[0];
1477 u32 *phy_ctrl_base = &emif_base->emif_ddr_ext_phy_ctrl_1;
1482 bug_00339_regs = get_bug_regs(&iterations);
1484 /* Put EMIF in to idle */
1485 clkctrl = __raw_readl((*prcm)->cm_memif_clkstctrl);
1486 __raw_writel(0x0, (*prcm)->cm_memif_clkstctrl);
1488 /* Copy the phy status registers in to phy ctrl shadow registers */
1489 for (i = 0; i < iterations; i++) {
1490 val = __raw_readl(phy_status_base +
1491 bug_00339_regs[i].read_reg - 1);
1493 __raw_writel(val, phy_ctrl_base +
1494 ((bug_00339_regs[i].write_reg - 1) << 1));
1496 __raw_writel(val, phy_ctrl_base +
1497 (bug_00339_regs[i].write_reg << 1) - 1);
1500 /* Disable leveling */
1501 writel(0x0, &emif_base->emif_rd_wr_lvl_rmp_ctl);
1503 __raw_writel(clkctrl, (*prcm)->cm_memif_clkstctrl);
1507 * SDRAM initialization:
1508 * SDRAM initialization has two parts:
1509 * 1. Configuring the SDRAM device
1510 * 2. Update the AC timings related parameters in the EMIF module
1511 * (1) should be done only once and should not be done while we are
1512 * running from SDRAM.
1513 * (2) can and should be done more than once if OPP changes.
1514 * Particularly, this may be needed when we boot without SPL and
1515 * and using Configuration Header(CH). ROM code supports only at 50% OPP
1516 * at boot (low power boot). So u-boot has to switch to OPP100 and update
1517 * the frequency. So,
1518 * Doing (1) and (2) makes sense - first time initialization
1519 * Doing (2) and not (1) makes sense - OPP change (when using CH)
1520 * Doing (1) and not (2) doen't make sense
1521 * See do_sdram_init() for the details
1523 void sdram_init(void)
1525 u32 in_sdram, size_prog, size_detect;
1526 struct emif_reg_struct *emif = (struct emif_reg_struct *)EMIF1_BASE;
1527 u32 sdram_type = emif_sdram_type(emif->emif_sdram_config);
1529 debug(">>sdram_init()\n");
1531 if (omap_hw_init_context() == OMAP_INIT_CONTEXT_UBOOT_AFTER_SPL)
1534 in_sdram = running_from_sdram();
1535 debug("in_sdram = %d\n", in_sdram);
1538 if ((sdram_type == EMIF_SDRAM_TYPE_LPDDR2) && !warm_reset())
1539 bypass_dpll((*prcm)->cm_clkmode_dpll_core);
1540 else if (sdram_type == EMIF_SDRAM_TYPE_DDR3)
1541 writel(CM_DLL_CTRL_NO_OVERRIDE, (*prcm)->cm_dll_ctrl);
1548 do_sdram_init(EMIF1_BASE);
1551 do_sdram_init(EMIF2_BASE);
1553 if (!(in_sdram || warm_reset())) {
1555 emif_post_init_config(EMIF1_BASE);
1557 emif_post_init_config(EMIF2_BASE);
1560 /* for the shadow registers to take effect */
1561 if (sdram_type == EMIF_SDRAM_TYPE_LPDDR2)
1564 /* Do some testing after the init */
1566 size_prog = omap_sdram_size();
1567 size_prog = log_2_n_round_down(size_prog);
1568 size_prog = (1 << size_prog);
1570 size_detect = get_ram_size((long *)CONFIG_SYS_SDRAM_BASE,
1572 /* Compare with the size programmed */
1573 if (size_detect != size_prog) {
1574 printf("SDRAM: identified size not same as expected"
1575 " size identified: %x expected: %x\n",
1579 debug("get_ram_size() successful");
1582 #if defined(CONFIG_TI_SECURE_DEVICE)
1584 * On HS devices, do static EMIF firewall configuration
1585 * but only do it if not already running in SDRAM
1588 if (0 != secure_emif_reserve())
1591 /* On HS devices, ensure static EMIF firewall APIs are locked */
1592 if (0 != secure_emif_firewall_lock())
1596 if (sdram_type == EMIF_SDRAM_TYPE_DDR3 &&
1597 (!in_sdram && !warm_reset()) && (!is_dra7xx())) {
1599 do_bug0039_workaround(EMIF1_BASE);
1601 do_bug0039_workaround(EMIF2_BASE);
1604 debug("<<sdram_init()\n");