common: Move hang() to the same header as panic()
[oweals/u-boot.git] / arch / arm / mach-omap2 / emif-common.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * EMIF programming
4  *
5  * (C) Copyright 2010
6  * Texas Instruments, <www.ti.com>
7  *
8  * Aneesh V <aneesh@ti.com>
9  */
10
11 #include <common.h>
12 #include <hang.h>
13 #include <init.h>
14 #include <net.h>
15 #include <asm/emif.h>
16 #include <asm/arch/clock.h>
17 #include <asm/arch/sys_proto.h>
18 #include <asm/omap_common.h>
19 #include <asm/omap_sec_common.h>
20 #include <asm/utils.h>
21 #include <linux/compiler.h>
22 #include <asm/ti-common/ti-edma3.h>
23
24 static int emif1_enabled = -1, emif2_enabled = -1;
25
26 void set_lpmode_selfrefresh(u32 base)
27 {
28         struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
29         u32 reg;
30
31         reg = readl(&emif->emif_pwr_mgmt_ctrl);
32         reg &= ~EMIF_REG_LP_MODE_MASK;
33         reg |= LP_MODE_SELF_REFRESH << EMIF_REG_LP_MODE_SHIFT;
34         reg &= ~EMIF_REG_SR_TIM_MASK;
35         writel(reg, &emif->emif_pwr_mgmt_ctrl);
36
37         /* dummy read for the new SR_TIM to be loaded */
38         readl(&emif->emif_pwr_mgmt_ctrl);
39 }
40
41 void force_emif_self_refresh()
42 {
43         set_lpmode_selfrefresh(EMIF1_BASE);
44         if (!is_dra72x())
45                 set_lpmode_selfrefresh(EMIF2_BASE);
46 }
47
48 inline u32 emif_num(u32 base)
49 {
50         if (base == EMIF1_BASE)
51                 return 1;
52         else if (base == EMIF2_BASE)
53                 return 2;
54         else
55                 return 0;
56 }
57
58 static inline u32 get_mr(u32 base, u32 cs, u32 mr_addr)
59 {
60         u32 mr;
61         struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
62
63         mr_addr |= cs << EMIF_REG_CS_SHIFT;
64         writel(mr_addr, &emif->emif_lpddr2_mode_reg_cfg);
65         if (omap_revision() == OMAP4430_ES2_0)
66                 mr = readl(&emif->emif_lpddr2_mode_reg_data_es2);
67         else
68                 mr = readl(&emif->emif_lpddr2_mode_reg_data);
69         debug("get_mr: EMIF%d cs %d mr %08x val 0x%x\n", emif_num(base),
70               cs, mr_addr, mr);
71         if (((mr & 0x0000ff00) >>  8) == (mr & 0xff) &&
72             ((mr & 0x00ff0000) >> 16) == (mr & 0xff) &&
73             ((mr & 0xff000000) >> 24) == (mr & 0xff))
74                 return mr & 0xff;
75         else
76                 return mr;
77 }
78
79 static inline void set_mr(u32 base, u32 cs, u32 mr_addr, u32 mr_val)
80 {
81         struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
82
83         mr_addr |= cs << EMIF_REG_CS_SHIFT;
84         writel(mr_addr, &emif->emif_lpddr2_mode_reg_cfg);
85         writel(mr_val, &emif->emif_lpddr2_mode_reg_data);
86 }
87
88 void emif_reset_phy(u32 base)
89 {
90         struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
91         u32 iodft;
92
93         iodft = readl(&emif->emif_iodft_tlgc);
94         iodft |= EMIF_REG_RESET_PHY_MASK;
95         writel(iodft, &emif->emif_iodft_tlgc);
96 }
97
98 static void do_lpddr2_init(u32 base, u32 cs)
99 {
100         u32 mr_addr;
101         const struct lpddr2_mr_regs *mr_regs;
102
103         get_lpddr2_mr_regs(&mr_regs);
104         /* Wait till device auto initialization is complete */
105         while (get_mr(base, cs, LPDDR2_MR0) & LPDDR2_MR0_DAI_MASK)
106                 ;
107         set_mr(base, cs, LPDDR2_MR10, mr_regs->mr10);
108         /*
109          * tZQINIT = 1 us
110          * Enough loops assuming a maximum of 2GHz
111          */
112
113         sdelay(2000);
114
115         set_mr(base, cs, LPDDR2_MR1, mr_regs->mr1);
116         set_mr(base, cs, LPDDR2_MR16, mr_regs->mr16);
117
118         /*
119          * Enable refresh along with writing MR2
120          * Encoding of RL in MR2 is (RL - 2)
121          */
122         mr_addr = LPDDR2_MR2 | EMIF_REG_REFRESH_EN_MASK;
123         set_mr(base, cs, mr_addr, mr_regs->mr2);
124
125         if (mr_regs->mr3 > 0)
126                 set_mr(base, cs, LPDDR2_MR3, mr_regs->mr3);
127 }
128
129 static void lpddr2_init(u32 base, const struct emif_regs *regs)
130 {
131         struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
132
133         /* Not NVM */
134         clrbits_le32(&emif->emif_lpddr2_nvm_config, EMIF_REG_CS1NVMEN_MASK);
135
136         /*
137          * Keep REG_INITREF_DIS = 1 to prevent re-initialization of SDRAM
138          * when EMIF_SDRAM_CONFIG register is written
139          */
140         setbits_le32(&emif->emif_sdram_ref_ctrl, EMIF_REG_INITREF_DIS_MASK);
141
142         /*
143          * Set the SDRAM_CONFIG and PHY_CTRL for the
144          * un-locked frequency & default RL
145          */
146         writel(regs->sdram_config_init, &emif->emif_sdram_config);
147         writel(regs->emif_ddr_phy_ctlr_1_init, &emif->emif_ddr_phy_ctrl_1);
148
149         do_ext_phy_settings(base, regs);
150
151         do_lpddr2_init(base, CS0);
152         if (regs->sdram_config & EMIF_REG_EBANK_MASK)
153                 do_lpddr2_init(base, CS1);
154
155         writel(regs->sdram_config, &emif->emif_sdram_config);
156         writel(regs->emif_ddr_phy_ctlr_1, &emif->emif_ddr_phy_ctrl_1);
157
158         /* Enable refresh now */
159         clrbits_le32(&emif->emif_sdram_ref_ctrl, EMIF_REG_INITREF_DIS_MASK);
160
161         }
162
163 __weak void do_ext_phy_settings(u32 base, const struct emif_regs *regs)
164 {
165 }
166
167 void emif_update_timings(u32 base, const struct emif_regs *regs)
168 {
169         struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
170
171         if (!is_dra7xx())
172                 writel(regs->ref_ctrl, &emif->emif_sdram_ref_ctrl_shdw);
173         else
174                 writel(regs->ref_ctrl_final, &emif->emif_sdram_ref_ctrl_shdw);
175
176         writel(regs->sdram_tim1, &emif->emif_sdram_tim_1_shdw);
177         writel(regs->sdram_tim2, &emif->emif_sdram_tim_2_shdw);
178         writel(regs->sdram_tim3, &emif->emif_sdram_tim_3_shdw);
179         if (omap_revision() == OMAP4430_ES1_0) {
180                 /* ES1 bug EMIF should be in force idle during freq_update */
181                 writel(0, &emif->emif_pwr_mgmt_ctrl);
182         } else {
183                 writel(EMIF_PWR_MGMT_CTRL, &emif->emif_pwr_mgmt_ctrl);
184                 writel(EMIF_PWR_MGMT_CTRL_SHDW, &emif->emif_pwr_mgmt_ctrl_shdw);
185         }
186         writel(regs->read_idle_ctrl, &emif->emif_read_idlectrl_shdw);
187         writel(regs->zq_config, &emif->emif_zq_config);
188         writel(regs->temp_alert_config, &emif->emif_temp_alert_config);
189         writel(regs->emif_ddr_phy_ctlr_1, &emif->emif_ddr_phy_ctrl_1_shdw);
190
191         if ((omap_revision() >= OMAP5430_ES1_0) || is_dra7xx()) {
192                 writel(EMIF_L3_CONFIG_VAL_SYS_10_MPU_5_LL_0,
193                         &emif->emif_l3_config);
194         } else if (omap_revision() >= OMAP4460_ES1_0) {
195                 writel(EMIF_L3_CONFIG_VAL_SYS_10_MPU_3_LL_0,
196                         &emif->emif_l3_config);
197         } else {
198                 writel(EMIF_L3_CONFIG_VAL_SYS_10_LL_0,
199                         &emif->emif_l3_config);
200         }
201 }
202
203 #ifndef CONFIG_OMAP44XX
204 static void omap5_ddr3_leveling(u32 base, const struct emif_regs *regs)
205 {
206         struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
207
208         /* keep sdram in self-refresh */
209         writel(((LP_MODE_SELF_REFRESH << EMIF_REG_LP_MODE_SHIFT)
210                 & EMIF_REG_LP_MODE_MASK), &emif->emif_pwr_mgmt_ctrl);
211         __udelay(130);
212
213         /*
214          * Set invert_clkout (if activated)--DDR_PHYCTRL_1
215          * Invert clock adds an additional half cycle delay on the
216          * command interface.  The additional half cycle, is usually
217          * meant to enable leveling in the situation that DQS is later
218          * than CK on the board.It also helps provide some additional
219          * margin for leveling.
220          */
221         writel(regs->emif_ddr_phy_ctlr_1,
222                &emif->emif_ddr_phy_ctrl_1);
223
224         writel(regs->emif_ddr_phy_ctlr_1,
225                &emif->emif_ddr_phy_ctrl_1_shdw);
226         __udelay(130);
227
228         writel(((LP_MODE_DISABLE << EMIF_REG_LP_MODE_SHIFT)
229                & EMIF_REG_LP_MODE_MASK), &emif->emif_pwr_mgmt_ctrl);
230
231         /* Launch Full leveling */
232         writel(DDR3_FULL_LVL, &emif->emif_rd_wr_lvl_ctl);
233
234         /* Wait till full leveling is complete */
235         readl(&emif->emif_rd_wr_lvl_ctl);
236               __udelay(130);
237
238         /* Read data eye leveling no of samples */
239         config_data_eye_leveling_samples(base);
240
241         /*
242          * Launch 8 incremental WR_LVL- to compensate for
243          * PHY limitation.
244          */
245         writel(0x2 << EMIF_REG_WRLVLINC_INT_SHIFT,
246                &emif->emif_rd_wr_lvl_ctl);
247
248         __udelay(130);
249
250         /* Launch Incremental leveling */
251         writel(DDR3_INC_LVL, &emif->emif_rd_wr_lvl_ctl);
252                __udelay(130);
253 }
254
255 static void update_hwleveling_output(u32 base, const struct emif_regs *regs)
256 {
257         struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
258         u32 *emif_ext_phy_ctrl_reg, *emif_phy_status;
259         u32 reg, i, phy;
260
261         emif_phy_status = (u32 *)&emif->emif_ddr_phy_status[6];
262         phy = readl(&emif->emif_ddr_phy_ctrl_1);
263
264         /* Update PHY_REG_RDDQS_RATIO */
265         emif_ext_phy_ctrl_reg = (u32 *)&emif->emif_ddr_ext_phy_ctrl_7;
266         if (!(phy & EMIF_DDR_PHY_CTRL_1_RDLVL_MASK_MASK))
267                 for (i = 0; i < PHY_RDDQS_RATIO_REGS; i++) {
268                         reg = readl(emif_phy_status++);
269                         writel(reg, emif_ext_phy_ctrl_reg++);
270                         writel(reg, emif_ext_phy_ctrl_reg++);
271                 }
272
273         /* Update PHY_REG_FIFO_WE_SLAVE_RATIO */
274         emif_ext_phy_ctrl_reg = (u32 *)&emif->emif_ddr_ext_phy_ctrl_2;
275         emif_phy_status = (u32 *)&emif->emif_ddr_phy_status[11];
276         if (!(phy & EMIF_DDR_PHY_CTRL_1_RDLVLGATE_MASK_MASK))
277                 for (i = 0; i < PHY_FIFO_WE_SLAVE_RATIO_REGS; i++) {
278                         reg = readl(emif_phy_status++);
279                         writel(reg, emif_ext_phy_ctrl_reg++);
280                         writel(reg, emif_ext_phy_ctrl_reg++);
281                 }
282
283         /* Update PHY_REG_WR_DQ/DQS_SLAVE_RATIO */
284         emif_ext_phy_ctrl_reg = (u32 *)&emif->emif_ddr_ext_phy_ctrl_12;
285         emif_phy_status = (u32 *)&emif->emif_ddr_phy_status[16];
286         if (!(phy & EMIF_DDR_PHY_CTRL_1_WRLVL_MASK_MASK))
287                 for (i = 0; i < PHY_REG_WR_DQ_SLAVE_RATIO_REGS; i++) {
288                         reg = readl(emif_phy_status++);
289                         writel(reg, emif_ext_phy_ctrl_reg++);
290                         writel(reg, emif_ext_phy_ctrl_reg++);
291                 }
292
293         /* Disable Leveling */
294         writel(regs->emif_ddr_phy_ctlr_1, &emif->emif_ddr_phy_ctrl_1);
295         writel(regs->emif_ddr_phy_ctlr_1, &emif->emif_ddr_phy_ctrl_1_shdw);
296         writel(0x0, &emif->emif_rd_wr_lvl_rmp_ctl);
297 }
298
299 static void dra7_ddr3_leveling(u32 base, const struct emif_regs *regs)
300 {
301         struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
302
303         /* Clear Error Status */
304         clrsetbits_le32(&emif->emif_ddr_ext_phy_ctrl_36,
305                         EMIF_REG_PHY_FIFO_WE_IN_MISALINED_CLR,
306                         EMIF_REG_PHY_FIFO_WE_IN_MISALINED_CLR);
307
308         clrsetbits_le32(&emif->emif_ddr_ext_phy_ctrl_36_shdw,
309                         EMIF_REG_PHY_FIFO_WE_IN_MISALINED_CLR,
310                         EMIF_REG_PHY_FIFO_WE_IN_MISALINED_CLR);
311
312         /* Disable refreshed before leveling */
313         clrsetbits_le32(&emif->emif_sdram_ref_ctrl, EMIF_REG_INITREF_DIS_MASK,
314                         EMIF_REG_INITREF_DIS_MASK);
315
316         /* Start Full leveling */
317         writel(DDR3_FULL_LVL, &emif->emif_rd_wr_lvl_ctl);
318
319         __udelay(300);
320
321         /* Check for leveling timeout */
322         if (readl(&emif->emif_status) & EMIF_REG_LEVELING_TO_MASK) {
323                 printf("Leveling timeout on EMIF%d\n", emif_num(base));
324                 return;
325         }
326
327         /* Enable refreshes after leveling */
328         clrbits_le32(&emif->emif_sdram_ref_ctrl, EMIF_REG_INITREF_DIS_MASK);
329
330         debug("HW leveling success\n");
331         /*
332          * Update slave ratios in EXT_PHY_CTRLx registers
333          * as per HW leveling output
334          */
335         update_hwleveling_output(base, regs);
336 }
337
338 static void dra7_reset_ddr_data(u32 base, u32 size)
339 {
340 #if defined(CONFIG_TI_EDMA3) && !defined(CONFIG_DMA)
341         enable_edma3_clocks();
342
343         edma3_fill(EDMA3_BASE, 1, (void *)base, 0, size);
344
345         disable_edma3_clocks();
346 #else
347         memset((void *)base, 0, size);
348 #endif
349 }
350
351 static void dra7_enable_ecc(u32 base, const struct emif_regs *regs)
352 {
353         struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
354         u32 rgn, rgn_start, size, ctrl_reg;
355
356         /* ECC available only on dra76x EMIF1 */
357         if ((base != EMIF1_BASE) || !is_dra76x())
358                 return;
359
360         if (regs->emif_ecc_ctrl_reg & EMIF_ECC_CTRL_REG_ECC_EN_MASK) {
361                 /* Disable high-order interleaving */
362                 clrbits_le32(MA_PRIORITY, MA_HIMEM_INTERLEAVE_UN_MASK);
363
364 #ifdef CONFIG_DRA7XX
365                 /* Clear the status flags and other history */
366                 writel(readl(&emif->emif_1b_ecc_err_cnt),
367                        &emif->emif_1b_ecc_err_cnt);
368                 writel(0xffffffff, &emif->emif_1b_ecc_err_dist_1);
369                 writel(0x2, &emif->emif_1b_ecc_err_addr_log);
370                 writel(0x1, &emif->emif_2b_ecc_err_addr_log);
371                 writel(EMIF_INT_WR_ECC_ERR_SYS_MASK |
372                        EMIF_INT_TWOBIT_ECC_ERR_SYS_MASK |
373                        EMIF_INT_ONEBIT_ECC_ERR_SYS_MASK,
374                        &emif->emif_irqstatus_sys);
375 #endif
376                 writel(regs->emif_ecc_address_range_1,
377                        &emif->emif_ecc_address_range_1);
378                 writel(regs->emif_ecc_address_range_2,
379                        &emif->emif_ecc_address_range_2);
380
381                 /* Disable RMW and ECC verification for read accesses */
382                 ctrl_reg = (regs->emif_ecc_ctrl_reg &
383                             ~EMIF_ECC_REG_RMW_EN_MASK) |
384                            EMIF_ECC_CTRL_REG_ECC_VERIFY_DIS_MASK;
385                 writel(ctrl_reg, &emif->emif_ecc_ctrl_reg);
386
387                 /* Set region1 memory with 0 */
388                 rgn_start = (regs->emif_ecc_address_range_1 &
389                              EMIF_ECC_REG_ECC_START_ADDR_MASK) << 16;
390                 rgn = rgn_start + CONFIG_SYS_SDRAM_BASE;
391                 size = (regs->emif_ecc_address_range_1 &
392                         EMIF_ECC_REG_ECC_END_ADDR_MASK) + 0x10000 - rgn_start;
393
394                 if (regs->emif_ecc_ctrl_reg &
395                     EMIF_ECC_REG_ECC_ADDR_RGN_1_EN_MASK)
396                         dra7_reset_ddr_data(rgn, size);
397
398                 /* Set region2 memory with 0 */
399                 rgn_start = (regs->emif_ecc_address_range_2 &
400                              EMIF_ECC_REG_ECC_START_ADDR_MASK) << 16;
401                 rgn = rgn_start + CONFIG_SYS_SDRAM_BASE;
402                 size = (regs->emif_ecc_address_range_2 &
403                         EMIF_ECC_REG_ECC_END_ADDR_MASK) + 0x10000 - rgn_start;
404
405                 if (regs->emif_ecc_ctrl_reg &
406                     EMIF_ECC_REG_ECC_ADDR_RGN_2_EN_MASK)
407                         dra7_reset_ddr_data(rgn, size);
408
409                 /* Default value enables RMW and ECC verification */
410                 writel(regs->emif_ecc_ctrl_reg, &emif->emif_ecc_ctrl_reg);
411         }
412 }
413
414 static void dra7_ddr3_init(u32 base, const struct emif_regs *regs)
415 {
416         struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
417
418         if (warm_reset()) {
419                 emif_reset_phy(base);
420                 writel(0x0, &emif->emif_pwr_mgmt_ctrl);
421         }
422         do_ext_phy_settings(base, regs);
423
424         writel(regs->ref_ctrl | EMIF_REG_INITREF_DIS_MASK,
425                &emif->emif_sdram_ref_ctrl);
426         /* Update timing registers */
427         writel(regs->sdram_tim1, &emif->emif_sdram_tim_1);
428         writel(regs->sdram_tim2, &emif->emif_sdram_tim_2);
429         writel(regs->sdram_tim3, &emif->emif_sdram_tim_3);
430
431         writel(EMIF_L3_CONFIG_VAL_SYS_10_MPU_5_LL_0, &emif->emif_l3_config);
432         writel(regs->read_idle_ctrl, &emif->emif_read_idlectrl);
433         writel(regs->zq_config, &emif->emif_zq_config);
434         writel(regs->temp_alert_config, &emif->emif_temp_alert_config);
435         writel(regs->emif_rd_wr_lvl_rmp_ctl, &emif->emif_rd_wr_lvl_rmp_ctl);
436         writel(regs->emif_rd_wr_lvl_ctl, &emif->emif_rd_wr_lvl_ctl);
437
438         writel(regs->emif_ddr_phy_ctlr_1_init, &emif->emif_ddr_phy_ctrl_1);
439         writel(regs->emif_rd_wr_exec_thresh, &emif->emif_rd_wr_exec_thresh);
440
441         writel(regs->ref_ctrl, &emif->emif_sdram_ref_ctrl);
442
443         writel(regs->sdram_config2, &emif->emif_lpddr2_nvm_config);
444         writel(regs->sdram_config_init, &emif->emif_sdram_config);
445
446         __udelay(1000);
447
448         writel(regs->ref_ctrl_final, &emif->emif_sdram_ref_ctrl);
449
450         if (regs->emif_rd_wr_lvl_rmp_ctl & EMIF_REG_RDWRLVL_EN_MASK) {
451                 /*
452                  * Perform Dummy ECC setup just to allow hardware
453                  * leveling of ECC memories
454                  */
455                 if (is_dra76x() && (base == EMIF1_BASE) &&
456                     (regs->emif_ecc_ctrl_reg & EMIF_ECC_CTRL_REG_ECC_EN_MASK)) {
457                         writel(0, &emif->emif_ecc_address_range_1);
458                         writel(0, &emif->emif_ecc_address_range_2);
459                         writel(EMIF_ECC_CTRL_REG_ECC_EN_MASK |
460                                EMIF_ECC_CTRL_REG_ECC_ADDR_RGN_PROT_MASK,
461                                &emif->emif_ecc_ctrl_reg);
462                 }
463
464                 dra7_ddr3_leveling(base, regs);
465
466                 /* Disable ECC */
467                 if (is_dra76x())
468                         writel(0, &emif->emif_ecc_ctrl_reg);
469         }
470
471         /* Enable ECC as necessary */
472         dra7_enable_ecc(base, regs);
473 }
474
475 static void omap5_ddr3_init(u32 base, const struct emif_regs *regs)
476 {
477         struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
478
479         writel(regs->ref_ctrl, &emif->emif_sdram_ref_ctrl);
480         writel(regs->sdram_config_init, &emif->emif_sdram_config);
481         /*
482          * Set SDRAM_CONFIG and PHY control registers to locked frequency
483          * and RL =7. As the default values of the Mode Registers are not
484          * defined, contents of mode Registers must be fully initialized.
485          * H/W takes care of this initialization
486          */
487         writel(regs->emif_ddr_phy_ctlr_1_init, &emif->emif_ddr_phy_ctrl_1);
488
489         /* Update timing registers */
490         writel(regs->sdram_tim1, &emif->emif_sdram_tim_1);
491         writel(regs->sdram_tim2, &emif->emif_sdram_tim_2);
492         writel(regs->sdram_tim3, &emif->emif_sdram_tim_3);
493
494         writel(regs->read_idle_ctrl, &emif->emif_read_idlectrl);
495
496         writel(regs->sdram_config2, &emif->emif_lpddr2_nvm_config);
497         writel(regs->sdram_config_init, &emif->emif_sdram_config);
498         do_ext_phy_settings(base, regs);
499
500         writel(regs->emif_rd_wr_lvl_rmp_ctl, &emif->emif_rd_wr_lvl_rmp_ctl);
501         omap5_ddr3_leveling(base, regs);
502 }
503
504 static void ddr3_init(u32 base, const struct emif_regs *regs)
505 {
506         if (is_omap54xx())
507                 omap5_ddr3_init(base, regs);
508         else
509                 dra7_ddr3_init(base, regs);
510 }
511 #endif
512
513 #ifndef CONFIG_SYS_EMIF_PRECALCULATED_TIMING_REGS
514 #define print_timing_reg(reg) debug(#reg" - 0x%08x\n", (reg))
515
516 /*
517  * Organization and refresh requirements for LPDDR2 devices of different
518  * types and densities. Derived from JESD209-2 section 2.4
519  */
520 const struct lpddr2_addressing addressing_table[] = {
521         /* Banks tREFIx10     rowx32,rowx16      colx32,colx16  density */
522         {BANKS4, T_REFI_15_6, {ROW_12, ROW_12}, {COL_7, COL_8} },/*64M */
523         {BANKS4, T_REFI_15_6, {ROW_12, ROW_12}, {COL_8, COL_9} },/*128M */
524         {BANKS4, T_REFI_7_8, {ROW_13, ROW_13}, {COL_8, COL_9} },/*256M */
525         {BANKS4, T_REFI_7_8, {ROW_13, ROW_13}, {COL_9, COL_10} },/*512M */
526         {BANKS8, T_REFI_7_8, {ROW_13, ROW_13}, {COL_9, COL_10} },/*1GS4 */
527         {BANKS8, T_REFI_3_9, {ROW_14, ROW_14}, {COL_9, COL_10} },/*2GS4 */
528         {BANKS8, T_REFI_3_9, {ROW_14, ROW_14}, {COL_10, COL_11} },/*4G */
529         {BANKS8, T_REFI_3_9, {ROW_15, ROW_15}, {COL_10, COL_11} },/*8G */
530         {BANKS4, T_REFI_7_8, {ROW_14, ROW_14}, {COL_9, COL_10} },/*1GS2 */
531         {BANKS4, T_REFI_3_9, {ROW_15, ROW_15}, {COL_9, COL_10} },/*2GS2 */
532 };
533
534 static const u32 lpddr2_density_2_size_in_mbytes[] = {
535         8,                      /* 64Mb */
536         16,                     /* 128Mb */
537         32,                     /* 256Mb */
538         64,                     /* 512Mb */
539         128,                    /* 1Gb   */
540         256,                    /* 2Gb   */
541         512,                    /* 4Gb   */
542         1024,                   /* 8Gb   */
543         2048,                   /* 16Gb  */
544         4096                    /* 32Gb  */
545 };
546
547 /*
548  * Calculate the period of DDR clock from frequency value and set the
549  * denominator and numerator in global variables for easy access later
550  */
551 static void set_ddr_clk_period(u32 freq)
552 {
553         /*
554          * period = 1/freq
555          * period_in_ns = 10^9/freq
556          */
557         *T_num = 1000000000;
558         *T_den = freq;
559         cancel_out(T_num, T_den, 200);
560
561 }
562
563 /*
564  * Convert time in nano seconds to number of cycles of DDR clock
565  */
566 static inline u32 ns_2_cycles(u32 ns)
567 {
568         return ((ns * (*T_den)) + (*T_num) - 1) / (*T_num);
569 }
570
571 /*
572  * ns_2_cycles with the difference that the time passed is 2 times the actual
573  * value(to avoid fractions). The cycles returned is for the original value of
574  * the timing parameter
575  */
576 static inline u32 ns_x2_2_cycles(u32 ns)
577 {
578         return ((ns * (*T_den)) + (*T_num) * 2 - 1) / ((*T_num) * 2);
579 }
580
581 /*
582  * Find addressing table index based on the device's type(S2 or S4) and
583  * density
584  */
585 s8 addressing_table_index(u8 type, u8 density, u8 width)
586 {
587         u8 index;
588         if ((density > LPDDR2_DENSITY_8Gb) || (width == LPDDR2_IO_WIDTH_8))
589                 return -1;
590
591         /*
592          * Look at the way ADDR_TABLE_INDEX* values have been defined
593          * in emif.h compared to LPDDR2_DENSITY_* values
594          * The table is layed out in the increasing order of density
595          * (ignoring type). The exceptions 1GS2 and 2GS2 have been placed
596          * at the end
597          */
598         if ((type == LPDDR2_TYPE_S2) && (density == LPDDR2_DENSITY_1Gb))
599                 index = ADDR_TABLE_INDEX1GS2;
600         else if ((type == LPDDR2_TYPE_S2) && (density == LPDDR2_DENSITY_2Gb))
601                 index = ADDR_TABLE_INDEX2GS2;
602         else
603                 index = density;
604
605         debug("emif: addressing table index %d\n", index);
606
607         return index;
608 }
609
610 /*
611  * Find the the right timing table from the array of timing
612  * tables of the device using DDR clock frequency
613  */
614 static const struct lpddr2_ac_timings *get_timings_table(const struct
615                         lpddr2_ac_timings *const *device_timings,
616                         u32 freq)
617 {
618         u32 i, temp, freq_nearest;
619         const struct lpddr2_ac_timings *timings = 0;
620
621         emif_assert(freq <= MAX_LPDDR2_FREQ);
622         emif_assert(device_timings);
623
624         /*
625          * Start with the maximum allowed frequency - that is always safe
626          */
627         freq_nearest = MAX_LPDDR2_FREQ;
628         /*
629          * Find the timings table that has the max frequency value:
630          *   i.  Above or equal to the DDR frequency - safe
631          *   ii. The lowest that satisfies condition (i) - optimal
632          */
633         for (i = 0; (i < MAX_NUM_SPEEDBINS) && device_timings[i]; i++) {
634                 temp = device_timings[i]->max_freq;
635                 if ((temp >= freq) && (temp <= freq_nearest)) {
636                         freq_nearest = temp;
637                         timings = device_timings[i];
638                 }
639         }
640         debug("emif: timings table: %d\n", freq_nearest);
641         return timings;
642 }
643
644 /*
645  * Finds the value of emif_sdram_config_reg
646  * All parameters are programmed based on the device on CS0.
647  * If there is a device on CS1, it will be same as that on CS0 or
648  * it will be NVM. We don't support NVM yet.
649  * If cs1_device pointer is NULL it is assumed that there is no device
650  * on CS1
651  */
652 static u32 get_sdram_config_reg(const struct lpddr2_device_details *cs0_device,
653                                 const struct lpddr2_device_details *cs1_device,
654                                 const struct lpddr2_addressing *addressing,
655                                 u8 RL)
656 {
657         u32 config_reg = 0;
658
659         config_reg |=  (cs0_device->type + 4) << EMIF_REG_SDRAM_TYPE_SHIFT;
660         config_reg |=  EMIF_INTERLEAVING_POLICY_MAX_INTERLEAVING <<
661                         EMIF_REG_IBANK_POS_SHIFT;
662
663         config_reg |= cs0_device->io_width << EMIF_REG_NARROW_MODE_SHIFT;
664
665         config_reg |= RL << EMIF_REG_CL_SHIFT;
666
667         config_reg |= addressing->row_sz[cs0_device->io_width] <<
668                         EMIF_REG_ROWSIZE_SHIFT;
669
670         config_reg |= addressing->num_banks << EMIF_REG_IBANK_SHIFT;
671
672         config_reg |= (cs1_device ? EBANK_CS1_EN : EBANK_CS1_DIS) <<
673                         EMIF_REG_EBANK_SHIFT;
674
675         config_reg |= addressing->col_sz[cs0_device->io_width] <<
676                         EMIF_REG_PAGESIZE_SHIFT;
677
678         return config_reg;
679 }
680
681 static u32 get_sdram_ref_ctrl(u32 freq,
682                               const struct lpddr2_addressing *addressing)
683 {
684         u32 ref_ctrl = 0, val = 0, freq_khz;
685         freq_khz = freq / 1000;
686         /*
687          * refresh rate to be set is 'tREFI * freq in MHz
688          * division by 10000 to account for khz and x10 in t_REFI_us_x10
689          */
690         val = addressing->t_REFI_us_x10 * freq_khz / 10000;
691         ref_ctrl |= val << EMIF_REG_REFRESH_RATE_SHIFT;
692
693         return ref_ctrl;
694 }
695
696 static u32 get_sdram_tim_1_reg(const struct lpddr2_ac_timings *timings,
697                                const struct lpddr2_min_tck *min_tck,
698                                const struct lpddr2_addressing *addressing)
699 {
700         u32 tim1 = 0, val = 0;
701         val = max(min_tck->tWTR, ns_x2_2_cycles(timings->tWTRx2)) - 1;
702         tim1 |= val << EMIF_REG_T_WTR_SHIFT;
703
704         if (addressing->num_banks == BANKS8)
705                 val = (timings->tFAW * (*T_den) + 4 * (*T_num) - 1) /
706                                                         (4 * (*T_num)) - 1;
707         else
708                 val = max(min_tck->tRRD, ns_2_cycles(timings->tRRD)) - 1;
709
710         tim1 |= val << EMIF_REG_T_RRD_SHIFT;
711
712         val = ns_2_cycles(timings->tRASmin + timings->tRPab) - 1;
713         tim1 |= val << EMIF_REG_T_RC_SHIFT;
714
715         val = max(min_tck->tRAS_MIN, ns_2_cycles(timings->tRASmin)) - 1;
716         tim1 |= val << EMIF_REG_T_RAS_SHIFT;
717
718         val = max(min_tck->tWR, ns_2_cycles(timings->tWR)) - 1;
719         tim1 |= val << EMIF_REG_T_WR_SHIFT;
720
721         val = max(min_tck->tRCD, ns_2_cycles(timings->tRCD)) - 1;
722         tim1 |= val << EMIF_REG_T_RCD_SHIFT;
723
724         val = max(min_tck->tRP_AB, ns_2_cycles(timings->tRPab)) - 1;
725         tim1 |= val << EMIF_REG_T_RP_SHIFT;
726
727         return tim1;
728 }
729
730 static u32 get_sdram_tim_2_reg(const struct lpddr2_ac_timings *timings,
731                                const struct lpddr2_min_tck *min_tck)
732 {
733         u32 tim2 = 0, val = 0;
734         val = max(min_tck->tCKE, timings->tCKE) - 1;
735         tim2 |= val << EMIF_REG_T_CKE_SHIFT;
736
737         val = max(min_tck->tRTP, ns_x2_2_cycles(timings->tRTPx2)) - 1;
738         tim2 |= val << EMIF_REG_T_RTP_SHIFT;
739
740         /*
741          * tXSRD = tRFCab + 10 ns. XSRD and XSNR should have the
742          * same value
743          */
744         val = ns_2_cycles(timings->tXSR) - 1;
745         tim2 |= val << EMIF_REG_T_XSRD_SHIFT;
746         tim2 |= val << EMIF_REG_T_XSNR_SHIFT;
747
748         val = max(min_tck->tXP, ns_x2_2_cycles(timings->tXPx2)) - 1;
749         tim2 |= val << EMIF_REG_T_XP_SHIFT;
750
751         return tim2;
752 }
753
754 static u32 get_sdram_tim_3_reg(const struct lpddr2_ac_timings *timings,
755                                const struct lpddr2_min_tck *min_tck,
756                                const struct lpddr2_addressing *addressing)
757 {
758         u32 tim3 = 0, val = 0;
759         val = min(timings->tRASmax * 10 / addressing->t_REFI_us_x10 - 1, 0xF);
760         tim3 |= val << EMIF_REG_T_RAS_MAX_SHIFT;
761
762         val = ns_2_cycles(timings->tRFCab) - 1;
763         tim3 |= val << EMIF_REG_T_RFC_SHIFT;
764
765         val = ns_x2_2_cycles(timings->tDQSCKMAXx2) - 1;
766         tim3 |= val << EMIF_REG_T_TDQSCKMAX_SHIFT;
767
768         val = ns_2_cycles(timings->tZQCS) - 1;
769         tim3 |= val << EMIF_REG_ZQ_ZQCS_SHIFT;
770
771         val = max(min_tck->tCKESR, ns_2_cycles(timings->tCKESR)) - 1;
772         tim3 |= val << EMIF_REG_T_CKESR_SHIFT;
773
774         return tim3;
775 }
776
777 static u32 get_zq_config_reg(const struct lpddr2_device_details *cs1_device,
778                              const struct lpddr2_addressing *addressing,
779                              u8 volt_ramp)
780 {
781         u32 zq = 0, val = 0;
782         if (volt_ramp)
783                 val =
784                     EMIF_ZQCS_INTERVAL_DVFS_IN_US * 10 /
785                     addressing->t_REFI_us_x10;
786         else
787                 val =
788                     EMIF_ZQCS_INTERVAL_NORMAL_IN_US * 10 /
789                     addressing->t_REFI_us_x10;
790         zq |= val << EMIF_REG_ZQ_REFINTERVAL_SHIFT;
791
792         zq |= (REG_ZQ_ZQCL_MULT - 1) << EMIF_REG_ZQ_ZQCL_MULT_SHIFT;
793
794         zq |= (REG_ZQ_ZQINIT_MULT - 1) << EMIF_REG_ZQ_ZQINIT_MULT_SHIFT;
795
796         zq |= REG_ZQ_SFEXITEN_ENABLE << EMIF_REG_ZQ_SFEXITEN_SHIFT;
797
798         /*
799          * Assuming that two chipselects have a single calibration resistor
800          * If there are indeed two calibration resistors, then this flag should
801          * be enabled to take advantage of dual calibration feature.
802          * This data should ideally come from board files. But considering
803          * that none of the boards today have calibration resistors per CS,
804          * it would be an unnecessary overhead.
805          */
806         zq |= REG_ZQ_DUALCALEN_DISABLE << EMIF_REG_ZQ_DUALCALEN_SHIFT;
807
808         zq |= REG_ZQ_CS0EN_ENABLE << EMIF_REG_ZQ_CS0EN_SHIFT;
809
810         zq |= (cs1_device ? 1 : 0) << EMIF_REG_ZQ_CS1EN_SHIFT;
811
812         return zq;
813 }
814
815 static u32 get_temp_alert_config(const struct lpddr2_device_details *cs1_device,
816                                  const struct lpddr2_addressing *addressing,
817                                  u8 is_derated)
818 {
819         u32 alert = 0, interval;
820         interval =
821             TEMP_ALERT_POLL_INTERVAL_MS * 10000 / addressing->t_REFI_us_x10;
822         if (is_derated)
823                 interval *= 4;
824         alert |= interval << EMIF_REG_TA_REFINTERVAL_SHIFT;
825
826         alert |= TEMP_ALERT_CONFIG_DEVCT_1 << EMIF_REG_TA_DEVCNT_SHIFT;
827
828         alert |= TEMP_ALERT_CONFIG_DEVWDT_32 << EMIF_REG_TA_DEVWDT_SHIFT;
829
830         alert |= 1 << EMIF_REG_TA_SFEXITEN_SHIFT;
831
832         alert |= 1 << EMIF_REG_TA_CS0EN_SHIFT;
833
834         alert |= (cs1_device ? 1 : 0) << EMIF_REG_TA_CS1EN_SHIFT;
835
836         return alert;
837 }
838
839 static u32 get_read_idle_ctrl_reg(u8 volt_ramp)
840 {
841         u32 idle = 0, val = 0;
842         if (volt_ramp)
843                 val = ns_2_cycles(READ_IDLE_INTERVAL_DVFS) / 64 - 1;
844         else
845                 /*Maximum value in normal conditions - suggested by hw team */
846                 val = 0x1FF;
847         idle |= val << EMIF_REG_READ_IDLE_INTERVAL_SHIFT;
848
849         idle |= EMIF_REG_READ_IDLE_LEN_VAL << EMIF_REG_READ_IDLE_LEN_SHIFT;
850
851         return idle;
852 }
853
854 static u32 get_ddr_phy_ctrl_1(u32 freq, u8 RL)
855 {
856         u32 phy = 0, val = 0;
857
858         phy |= (RL + 2) << EMIF_REG_READ_LATENCY_SHIFT;
859
860         if (freq <= 100000000)
861                 val = EMIF_DLL_SLAVE_DLY_CTRL_100_MHZ_AND_LESS;
862         else if (freq <= 200000000)
863                 val = EMIF_DLL_SLAVE_DLY_CTRL_200_MHZ;
864         else
865                 val = EMIF_DLL_SLAVE_DLY_CTRL_400_MHZ;
866         phy |= val << EMIF_REG_DLL_SLAVE_DLY_CTRL_SHIFT;
867
868         /* Other fields are constant magic values. Hardcode them together */
869         phy |= EMIF_DDR_PHY_CTRL_1_BASE_VAL <<
870                 EMIF_EMIF_DDR_PHY_CTRL_1_BASE_VAL_SHIFT;
871
872         return phy;
873 }
874
875 static u32 get_emif_mem_size(u32 base)
876 {
877         u32 size_mbytes = 0, temp;
878         struct emif_device_details dev_details;
879         struct lpddr2_device_details cs0_dev_details, cs1_dev_details;
880         u32 emif_nr = emif_num(base);
881
882         emif_reset_phy(base);
883         dev_details.cs0_device_details = emif_get_device_details(emif_nr, CS0,
884                                                 &cs0_dev_details);
885         dev_details.cs1_device_details = emif_get_device_details(emif_nr, CS1,
886                                                 &cs1_dev_details);
887         emif_reset_phy(base);
888
889         if (dev_details.cs0_device_details) {
890                 temp = dev_details.cs0_device_details->density;
891                 size_mbytes += lpddr2_density_2_size_in_mbytes[temp];
892         }
893
894         if (dev_details.cs1_device_details) {
895                 temp = dev_details.cs1_device_details->density;
896                 size_mbytes += lpddr2_density_2_size_in_mbytes[temp];
897         }
898         /* convert to bytes */
899         return size_mbytes << 20;
900 }
901
902 /* Gets the encoding corresponding to a given DMM section size */
903 u32 get_dmm_section_size_map(u32 section_size)
904 {
905         /*
906          * Section size mapping:
907          * 0x0: 16-MiB section
908          * 0x1: 32-MiB section
909          * 0x2: 64-MiB section
910          * 0x3: 128-MiB section
911          * 0x4: 256-MiB section
912          * 0x5: 512-MiB section
913          * 0x6: 1-GiB section
914          * 0x7: 2-GiB section
915          */
916         section_size >>= 24; /* divide by 16 MB */
917         return log_2_n_round_down(section_size);
918 }
919
920 static void emif_calculate_regs(
921                 const struct emif_device_details *emif_dev_details,
922                 u32 freq, struct emif_regs *regs)
923 {
924         u32 temp, sys_freq;
925         const struct lpddr2_addressing *addressing;
926         const struct lpddr2_ac_timings *timings;
927         const struct lpddr2_min_tck *min_tck;
928         const struct lpddr2_device_details *cs0_dev_details =
929                                         emif_dev_details->cs0_device_details;
930         const struct lpddr2_device_details *cs1_dev_details =
931                                         emif_dev_details->cs1_device_details;
932         const struct lpddr2_device_timings *cs0_dev_timings =
933                                         emif_dev_details->cs0_device_timings;
934
935         emif_assert(emif_dev_details);
936         emif_assert(regs);
937         /*
938          * You can not have a device on CS1 without one on CS0
939          * So configuring EMIF without a device on CS0 doesn't
940          * make sense
941          */
942         emif_assert(cs0_dev_details);
943         emif_assert(cs0_dev_details->type != LPDDR2_TYPE_NVM);
944         /*
945          * If there is a device on CS1 it should be same type as CS0
946          * (or NVM. But NVM is not supported in this driver yet)
947          */
948         emif_assert((cs1_dev_details == NULL) ||
949                     (cs1_dev_details->type == LPDDR2_TYPE_NVM) ||
950                     (cs0_dev_details->type == cs1_dev_details->type));
951         emif_assert(freq <= MAX_LPDDR2_FREQ);
952
953         set_ddr_clk_period(freq);
954
955         /*
956          * The device on CS0 is used for all timing calculations
957          * There is only one set of registers for timings per EMIF. So, if the
958          * second CS(CS1) has a device, it should have the same timings as the
959          * device on CS0
960          */
961         timings = get_timings_table(cs0_dev_timings->ac_timings, freq);
962         emif_assert(timings);
963         min_tck = cs0_dev_timings->min_tck;
964
965         temp = addressing_table_index(cs0_dev_details->type,
966                                       cs0_dev_details->density,
967                                       cs0_dev_details->io_width);
968
969         emif_assert((temp >= 0));
970         addressing = &(addressing_table[temp]);
971         emif_assert(addressing);
972
973         sys_freq = get_sys_clk_freq();
974
975         regs->sdram_config_init = get_sdram_config_reg(cs0_dev_details,
976                                                         cs1_dev_details,
977                                                         addressing, RL_BOOT);
978
979         regs->sdram_config = get_sdram_config_reg(cs0_dev_details,
980                                                 cs1_dev_details,
981                                                 addressing, RL_FINAL);
982
983         regs->ref_ctrl = get_sdram_ref_ctrl(freq, addressing);
984
985         regs->sdram_tim1 = get_sdram_tim_1_reg(timings, min_tck, addressing);
986
987         regs->sdram_tim2 = get_sdram_tim_2_reg(timings, min_tck);
988
989         regs->sdram_tim3 = get_sdram_tim_3_reg(timings, min_tck, addressing);
990
991         regs->read_idle_ctrl = get_read_idle_ctrl_reg(LPDDR2_VOLTAGE_STABLE);
992
993         regs->temp_alert_config =
994             get_temp_alert_config(cs1_dev_details, addressing, 0);
995
996         regs->zq_config = get_zq_config_reg(cs1_dev_details, addressing,
997                                             LPDDR2_VOLTAGE_STABLE);
998
999         regs->emif_ddr_phy_ctlr_1_init =
1000                         get_ddr_phy_ctrl_1(sys_freq / 2, RL_BOOT);
1001
1002         regs->emif_ddr_phy_ctlr_1 =
1003                         get_ddr_phy_ctrl_1(freq, RL_FINAL);
1004
1005         regs->freq = freq;
1006
1007         print_timing_reg(regs->sdram_config_init);
1008         print_timing_reg(regs->sdram_config);
1009         print_timing_reg(regs->ref_ctrl);
1010         print_timing_reg(regs->sdram_tim1);
1011         print_timing_reg(regs->sdram_tim2);
1012         print_timing_reg(regs->sdram_tim3);
1013         print_timing_reg(regs->read_idle_ctrl);
1014         print_timing_reg(regs->temp_alert_config);
1015         print_timing_reg(regs->zq_config);
1016         print_timing_reg(regs->emif_ddr_phy_ctlr_1);
1017         print_timing_reg(regs->emif_ddr_phy_ctlr_1_init);
1018 }
1019 #endif /* CONFIG_SYS_EMIF_PRECALCULATED_TIMING_REGS */
1020
1021 #ifdef CONFIG_SYS_AUTOMATIC_SDRAM_DETECTION
1022 const char *get_lpddr2_type(u8 type_id)
1023 {
1024         switch (type_id) {
1025         case LPDDR2_TYPE_S4:
1026                 return "LPDDR2-S4";
1027         case LPDDR2_TYPE_S2:
1028                 return "LPDDR2-S2";
1029         default:
1030                 return NULL;
1031         }
1032 }
1033
1034 const char *get_lpddr2_io_width(u8 width_id)
1035 {
1036         switch (width_id) {
1037         case LPDDR2_IO_WIDTH_8:
1038                 return "x8";
1039         case LPDDR2_IO_WIDTH_16:
1040                 return "x16";
1041         case LPDDR2_IO_WIDTH_32:
1042                 return "x32";
1043         default:
1044                 return NULL;
1045         }
1046 }
1047
1048 const char *get_lpddr2_manufacturer(u32 manufacturer)
1049 {
1050         switch (manufacturer) {
1051         case LPDDR2_MANUFACTURER_SAMSUNG:
1052                 return "Samsung";
1053         case LPDDR2_MANUFACTURER_QIMONDA:
1054                 return "Qimonda";
1055         case LPDDR2_MANUFACTURER_ELPIDA:
1056                 return "Elpida";
1057         case LPDDR2_MANUFACTURER_ETRON:
1058                 return "Etron";
1059         case LPDDR2_MANUFACTURER_NANYA:
1060                 return "Nanya";
1061         case LPDDR2_MANUFACTURER_HYNIX:
1062                 return "Hynix";
1063         case LPDDR2_MANUFACTURER_MOSEL:
1064                 return "Mosel";
1065         case LPDDR2_MANUFACTURER_WINBOND:
1066                 return "Winbond";
1067         case LPDDR2_MANUFACTURER_ESMT:
1068                 return "ESMT";
1069         case LPDDR2_MANUFACTURER_SPANSION:
1070                 return "Spansion";
1071         case LPDDR2_MANUFACTURER_SST:
1072                 return "SST";
1073         case LPDDR2_MANUFACTURER_ZMOS:
1074                 return "ZMOS";
1075         case LPDDR2_MANUFACTURER_INTEL:
1076                 return "Intel";
1077         case LPDDR2_MANUFACTURER_NUMONYX:
1078                 return "Numonyx";
1079         case LPDDR2_MANUFACTURER_MICRON:
1080                 return "Micron";
1081         default:
1082                 return NULL;
1083         }
1084 }
1085
1086 static void display_sdram_details(u32 emif_nr, u32 cs,
1087                                   struct lpddr2_device_details *device)
1088 {
1089         const char *mfg_str;
1090         const char *type_str;
1091         char density_str[10];
1092         u32 density;
1093
1094         debug("EMIF%d CS%d\t", emif_nr, cs);
1095
1096         if (!device) {
1097                 debug("None\n");
1098                 return;
1099         }
1100
1101         mfg_str = get_lpddr2_manufacturer(device->manufacturer);
1102         type_str = get_lpddr2_type(device->type);
1103
1104         density = lpddr2_density_2_size_in_mbytes[device->density];
1105         if ((density / 1024 * 1024) == density) {
1106                 density /= 1024;
1107                 sprintf(density_str, "%d GB", density);
1108         } else
1109                 sprintf(density_str, "%d MB", density);
1110         if (mfg_str && type_str)
1111                 debug("%s\t\t%s\t%s\n", mfg_str, type_str, density_str);
1112 }
1113
1114 static u8 is_lpddr2_sdram_present(u32 base, u32 cs,
1115                                   struct lpddr2_device_details *lpddr2_device)
1116 {
1117         u32 mr = 0, temp;
1118
1119         mr = get_mr(base, cs, LPDDR2_MR0);
1120         if (mr > 0xFF) {
1121                 /* Mode register value bigger than 8 bit */
1122                 return 0;
1123         }
1124
1125         temp = (mr & LPDDR2_MR0_DI_MASK) >> LPDDR2_MR0_DI_SHIFT;
1126         if (temp) {
1127                 /* Not SDRAM */
1128                 return 0;
1129         }
1130         temp = (mr & LPDDR2_MR0_DNVI_MASK) >> LPDDR2_MR0_DNVI_SHIFT;
1131
1132         if (temp) {
1133                 /* DNV supported - But DNV is only supported for NVM */
1134                 return 0;
1135         }
1136
1137         mr = get_mr(base, cs, LPDDR2_MR4);
1138         if (mr > 0xFF) {
1139                 /* Mode register value bigger than 8 bit */
1140                 return 0;
1141         }
1142
1143         mr = get_mr(base, cs, LPDDR2_MR5);
1144         if (mr > 0xFF) {
1145                 /* Mode register value bigger than 8 bit */
1146                 return 0;
1147         }
1148
1149         if (!get_lpddr2_manufacturer(mr)) {
1150                 /* Manufacturer not identified */
1151                 return 0;
1152         }
1153         lpddr2_device->manufacturer = mr;
1154
1155         mr = get_mr(base, cs, LPDDR2_MR6);
1156         if (mr >= 0xFF) {
1157                 /* Mode register value bigger than 8 bit */
1158                 return 0;
1159         }
1160
1161         mr = get_mr(base, cs, LPDDR2_MR7);
1162         if (mr >= 0xFF) {
1163                 /* Mode register value bigger than 8 bit */
1164                 return 0;
1165         }
1166
1167         mr = get_mr(base, cs, LPDDR2_MR8);
1168         if (mr >= 0xFF) {
1169                 /* Mode register value bigger than 8 bit */
1170                 return 0;
1171         }
1172
1173         temp = (mr & MR8_TYPE_MASK) >> MR8_TYPE_SHIFT;
1174         if (!get_lpddr2_type(temp)) {
1175                 /* Not SDRAM */
1176                 return 0;
1177         }
1178         lpddr2_device->type = temp;
1179
1180         temp = (mr & MR8_DENSITY_MASK) >> MR8_DENSITY_SHIFT;
1181         if (temp > LPDDR2_DENSITY_32Gb) {
1182                 /* Density not supported */
1183                 return 0;
1184         }
1185         lpddr2_device->density = temp;
1186
1187         temp = (mr & MR8_IO_WIDTH_MASK) >> MR8_IO_WIDTH_SHIFT;
1188         if (!get_lpddr2_io_width(temp)) {
1189                 /* IO width unsupported value */
1190                 return 0;
1191         }
1192         lpddr2_device->io_width = temp;
1193
1194         /*
1195          * If all the above tests pass we should
1196          * have a device on this chip-select
1197          */
1198         return 1;
1199 }
1200
1201 struct lpddr2_device_details *emif_get_device_details(u32 emif_nr, u8 cs,
1202                         struct lpddr2_device_details *lpddr2_dev_details)
1203 {
1204         u32 phy;
1205         u32 base = (emif_nr == 1) ? EMIF1_BASE : EMIF2_BASE;
1206
1207         struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
1208
1209         if (!lpddr2_dev_details)
1210                 return NULL;
1211
1212         /* Do the minimum init for mode register accesses */
1213         if (!(running_from_sdram() || warm_reset())) {
1214                 phy = get_ddr_phy_ctrl_1(get_sys_clk_freq() / 2, RL_BOOT);
1215                 writel(phy, &emif->emif_ddr_phy_ctrl_1);
1216         }
1217
1218         if (!(is_lpddr2_sdram_present(base, cs, lpddr2_dev_details)))
1219                 return NULL;
1220
1221         display_sdram_details(emif_num(base), cs, lpddr2_dev_details);
1222
1223         return lpddr2_dev_details;
1224 }
1225 #endif /* CONFIG_SYS_AUTOMATIC_SDRAM_DETECTION */
1226
1227 static void do_sdram_init(u32 base)
1228 {
1229         const struct emif_regs *regs;
1230         u32 in_sdram, emif_nr;
1231
1232         debug(">>do_sdram_init() %x\n", base);
1233
1234         in_sdram = running_from_sdram();
1235         emif_nr = (base == EMIF1_BASE) ? 1 : 2;
1236
1237 #ifdef CONFIG_SYS_EMIF_PRECALCULATED_TIMING_REGS
1238         emif_get_reg_dump(emif_nr, &regs);
1239         if (!regs) {
1240                 debug("EMIF: reg dump not provided\n");
1241                 return;
1242         }
1243 #else
1244         /*
1245          * The user has not provided the register values. We need to
1246          * calculate it based on the timings and the DDR frequency
1247          */
1248         struct emif_device_details dev_details;
1249         struct emif_regs calculated_regs;
1250
1251         /*
1252          * Get device details:
1253          * - Discovered if CONFIG_SYS_AUTOMATIC_SDRAM_DETECTION is set
1254          * - Obtained from user otherwise
1255          */
1256         struct lpddr2_device_details cs0_dev_details, cs1_dev_details;
1257         emif_reset_phy(base);
1258         dev_details.cs0_device_details = emif_get_device_details(emif_nr, CS0,
1259                                                 &cs0_dev_details);
1260         dev_details.cs1_device_details = emif_get_device_details(emif_nr, CS1,
1261                                                 &cs1_dev_details);
1262         emif_reset_phy(base);
1263
1264         /* Return if no devices on this EMIF */
1265         if (!dev_details.cs0_device_details &&
1266             !dev_details.cs1_device_details) {
1267                 return;
1268         }
1269
1270         /*
1271          * Get device timings:
1272          * - Default timings specified by JESD209-2 if
1273          *   CONFIG_SYS_DEFAULT_LPDDR2_TIMINGS is set
1274          * - Obtained from user otherwise
1275          */
1276         emif_get_device_timings(emif_nr, &dev_details.cs0_device_timings,
1277                                 &dev_details.cs1_device_timings);
1278
1279         /* Calculate the register values */
1280         emif_calculate_regs(&dev_details, omap_ddr_clk(), &calculated_regs);
1281         regs = &calculated_regs;
1282 #endif /* CONFIG_SYS_EMIF_PRECALCULATED_TIMING_REGS */
1283
1284         /*
1285          * Initializing the DDR device can not happen from SDRAM.
1286          * Changing the timing registers in EMIF can happen(going from one
1287          * OPP to another)
1288          */
1289         if (!in_sdram && (!warm_reset() || is_dra7xx())) {
1290                 if (emif_sdram_type(regs->sdram_config) ==
1291                     EMIF_SDRAM_TYPE_LPDDR2)
1292                         lpddr2_init(base, regs);
1293 #ifndef CONFIG_OMAP44XX
1294                 else
1295                         ddr3_init(base, regs);
1296 #endif
1297         }
1298 #ifdef CONFIG_OMAP54XX
1299         if (warm_reset() && (emif_sdram_type(regs->sdram_config) ==
1300             EMIF_SDRAM_TYPE_DDR3) && !is_dra7xx()) {
1301                 set_lpmode_selfrefresh(base);
1302                 emif_reset_phy(base);
1303                 omap5_ddr3_leveling(base, regs);
1304         }
1305 #endif
1306
1307         /* Write to the shadow registers */
1308         emif_update_timings(base, regs);
1309
1310         debug("<<do_sdram_init() %x\n", base);
1311 }
1312
1313 void emif_post_init_config(u32 base)
1314 {
1315         struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
1316         u32 omap_rev = omap_revision();
1317
1318         /* reset phy on ES2.0 */
1319         if (omap_rev == OMAP4430_ES2_0)
1320                 emif_reset_phy(base);
1321
1322         /* Put EMIF back in smart idle on ES1.0 */
1323         if (omap_rev == OMAP4430_ES1_0)
1324                 writel(0x80000000, &emif->emif_pwr_mgmt_ctrl);
1325 }
1326
1327 void dmm_init(u32 base)
1328 {
1329         const struct dmm_lisa_map_regs *lisa_map_regs;
1330         u32 i, section, valid;
1331
1332 #ifdef CONFIG_SYS_EMIF_PRECALCULATED_TIMING_REGS
1333         emif_get_dmm_regs(&lisa_map_regs);
1334 #else
1335         u32 emif1_size, emif2_size, mapped_size, section_map = 0;
1336         u32 section_cnt, sys_addr;
1337         struct dmm_lisa_map_regs lis_map_regs_calculated = {0};
1338
1339         mapped_size = 0;
1340         section_cnt = 3;
1341         sys_addr = CONFIG_SYS_SDRAM_BASE;
1342         emif1_size = get_emif_mem_size(EMIF1_BASE);
1343         emif2_size = get_emif_mem_size(EMIF2_BASE);
1344         debug("emif1_size 0x%x emif2_size 0x%x\n", emif1_size, emif2_size);
1345
1346         if (!emif1_size && !emif2_size)
1347                 return;
1348
1349         /* symmetric interleaved section */
1350         if (emif1_size && emif2_size) {
1351                 mapped_size = min(emif1_size, emif2_size);
1352                 section_map = DMM_LISA_MAP_INTERLEAVED_BASE_VAL;
1353                 section_map |= 0 << EMIF_SDRC_ADDR_SHIFT;
1354                 /* only MSB */
1355                 section_map |= (sys_addr >> 24) <<
1356                                 EMIF_SYS_ADDR_SHIFT;
1357                 section_map |= get_dmm_section_size_map(mapped_size * 2)
1358                                 << EMIF_SYS_SIZE_SHIFT;
1359                 lis_map_regs_calculated.dmm_lisa_map_3 = section_map;
1360                 emif1_size -= mapped_size;
1361                 emif2_size -= mapped_size;
1362                 sys_addr += (mapped_size * 2);
1363                 section_cnt--;
1364         }
1365
1366         /*
1367          * Single EMIF section(we can have a maximum of 1 single EMIF
1368          * section- either EMIF1 or EMIF2 or none, but not both)
1369          */
1370         if (emif1_size) {
1371                 section_map = DMM_LISA_MAP_EMIF1_ONLY_BASE_VAL;
1372                 section_map |= get_dmm_section_size_map(emif1_size)
1373                                 << EMIF_SYS_SIZE_SHIFT;
1374                 /* only MSB */
1375                 section_map |= (mapped_size >> 24) <<
1376                                 EMIF_SDRC_ADDR_SHIFT;
1377                 /* only MSB */
1378                 section_map |= (sys_addr >> 24) << EMIF_SYS_ADDR_SHIFT;
1379                 section_cnt--;
1380         }
1381         if (emif2_size) {
1382                 section_map = DMM_LISA_MAP_EMIF2_ONLY_BASE_VAL;
1383                 section_map |= get_dmm_section_size_map(emif2_size) <<
1384                                 EMIF_SYS_SIZE_SHIFT;
1385                 /* only MSB */
1386                 section_map |= mapped_size >> 24 << EMIF_SDRC_ADDR_SHIFT;
1387                 /* only MSB */
1388                 section_map |= sys_addr >> 24 << EMIF_SYS_ADDR_SHIFT;
1389                 section_cnt--;
1390         }
1391
1392         if (section_cnt == 2) {
1393                 /* Only 1 section - either symmetric or single EMIF */
1394                 lis_map_regs_calculated.dmm_lisa_map_3 = section_map;
1395                 lis_map_regs_calculated.dmm_lisa_map_2 = 0;
1396                 lis_map_regs_calculated.dmm_lisa_map_1 = 0;
1397         } else {
1398                 /* 2 sections - 1 symmetric, 1 single EMIF */
1399                 lis_map_regs_calculated.dmm_lisa_map_2 = section_map;
1400                 lis_map_regs_calculated.dmm_lisa_map_1 = 0;
1401         }
1402
1403         /* TRAP for invalid TILER mappings in section 0 */
1404         lis_map_regs_calculated.dmm_lisa_map_0 = DMM_LISA_MAP_0_INVAL_ADDR_TRAP;
1405
1406         if (omap_revision() >= OMAP4460_ES1_0)
1407                 lis_map_regs_calculated.is_ma_present = 1;
1408
1409         lisa_map_regs = &lis_map_regs_calculated;
1410 #endif
1411         struct dmm_lisa_map_regs *hw_lisa_map_regs =
1412             (struct dmm_lisa_map_regs *)base;
1413
1414         writel(0, &hw_lisa_map_regs->dmm_lisa_map_3);
1415         writel(0, &hw_lisa_map_regs->dmm_lisa_map_2);
1416         writel(0, &hw_lisa_map_regs->dmm_lisa_map_1);
1417         writel(0, &hw_lisa_map_regs->dmm_lisa_map_0);
1418
1419         writel(lisa_map_regs->dmm_lisa_map_3,
1420                 &hw_lisa_map_regs->dmm_lisa_map_3);
1421         writel(lisa_map_regs->dmm_lisa_map_2,
1422                 &hw_lisa_map_regs->dmm_lisa_map_2);
1423         writel(lisa_map_regs->dmm_lisa_map_1,
1424                 &hw_lisa_map_regs->dmm_lisa_map_1);
1425         writel(lisa_map_regs->dmm_lisa_map_0,
1426                 &hw_lisa_map_regs->dmm_lisa_map_0);
1427
1428         if (lisa_map_regs->is_ma_present) {
1429                 hw_lisa_map_regs =
1430                     (struct dmm_lisa_map_regs *)MA_BASE;
1431
1432                 writel(lisa_map_regs->dmm_lisa_map_3,
1433                         &hw_lisa_map_regs->dmm_lisa_map_3);
1434                 writel(lisa_map_regs->dmm_lisa_map_2,
1435                         &hw_lisa_map_regs->dmm_lisa_map_2);
1436                 writel(lisa_map_regs->dmm_lisa_map_1,
1437                         &hw_lisa_map_regs->dmm_lisa_map_1);
1438                 writel(lisa_map_regs->dmm_lisa_map_0,
1439                         &hw_lisa_map_regs->dmm_lisa_map_0);
1440
1441                 setbits_le32(MA_PRIORITY, MA_HIMEM_INTERLEAVE_UN_MASK);
1442         }
1443
1444         /*
1445          * EMIF should be configured only when
1446          * memory is mapped on it. Using emif1_enabled
1447          * and emif2_enabled variables for this.
1448          */
1449         emif1_enabled = 0;
1450         emif2_enabled = 0;
1451         for (i = 0; i < 4; i++) {
1452                 section = __raw_readl(DMM_BASE + i*4);
1453                 valid = (section & EMIF_SDRC_MAP_MASK) >>
1454                         (EMIF_SDRC_MAP_SHIFT);
1455                 if (valid == 3) {
1456                         emif1_enabled = 1;
1457                         emif2_enabled = 1;
1458                         break;
1459                 }
1460
1461                 if (valid == 1)
1462                         emif1_enabled = 1;
1463
1464                 if (valid == 2)
1465                         emif2_enabled = 1;
1466         }
1467 }
1468
1469 static void do_bug0039_workaround(u32 base)
1470 {
1471         u32 val, i, clkctrl;
1472         struct emif_reg_struct *emif_base = (struct emif_reg_struct *)base;
1473         const struct read_write_regs *bug_00339_regs;
1474         u32 iterations;
1475         u32 *phy_status_base = &emif_base->emif_ddr_phy_status[0];
1476         u32 *phy_ctrl_base = &emif_base->emif_ddr_ext_phy_ctrl_1;
1477
1478         if (is_dra7xx())
1479                 phy_status_base++;
1480
1481         bug_00339_regs = get_bug_regs(&iterations);
1482
1483         /* Put EMIF in to idle */
1484         clkctrl = __raw_readl((*prcm)->cm_memif_clkstctrl);
1485         __raw_writel(0x0, (*prcm)->cm_memif_clkstctrl);
1486
1487         /* Copy the phy status registers in to phy ctrl shadow registers */
1488         for (i = 0; i < iterations; i++) {
1489                 val = __raw_readl(phy_status_base +
1490                                   bug_00339_regs[i].read_reg - 1);
1491
1492                 __raw_writel(val, phy_ctrl_base +
1493                              ((bug_00339_regs[i].write_reg - 1) << 1));
1494
1495                 __raw_writel(val, phy_ctrl_base +
1496                              (bug_00339_regs[i].write_reg << 1) - 1);
1497         }
1498
1499         /* Disable leveling */
1500         writel(0x0, &emif_base->emif_rd_wr_lvl_rmp_ctl);
1501
1502         __raw_writel(clkctrl,  (*prcm)->cm_memif_clkstctrl);
1503 }
1504
1505 /*
1506  * SDRAM initialization:
1507  * SDRAM initialization has two parts:
1508  * 1. Configuring the SDRAM device
1509  * 2. Update the AC timings related parameters in the EMIF module
1510  * (1) should be done only once and should not be done while we are
1511  * running from SDRAM.
1512  * (2) can and should be done more than once if OPP changes.
1513  * Particularly, this may be needed when we boot without SPL and
1514  * and using Configuration Header(CH). ROM code supports only at 50% OPP
1515  * at boot (low power boot). So u-boot has to switch to OPP100 and update
1516  * the frequency. So,
1517  * Doing (1) and (2) makes sense - first time initialization
1518  * Doing (2) and not (1) makes sense - OPP change (when using CH)
1519  * Doing (1) and not (2) doen't make sense
1520  * See do_sdram_init() for the details
1521  */
1522 void sdram_init(void)
1523 {
1524         u32 in_sdram, size_prog, size_detect;
1525         struct emif_reg_struct *emif = (struct emif_reg_struct *)EMIF1_BASE;
1526         u32 sdram_type = emif_sdram_type(emif->emif_sdram_config);
1527
1528         debug(">>sdram_init()\n");
1529
1530         if (omap_hw_init_context() == OMAP_INIT_CONTEXT_UBOOT_AFTER_SPL)
1531                 return;
1532
1533         in_sdram = running_from_sdram();
1534         debug("in_sdram = %d\n", in_sdram);
1535
1536         if (!in_sdram) {
1537                 if ((sdram_type == EMIF_SDRAM_TYPE_LPDDR2) && !warm_reset())
1538                         bypass_dpll((*prcm)->cm_clkmode_dpll_core);
1539                 else if (sdram_type == EMIF_SDRAM_TYPE_DDR3)
1540                         writel(CM_DLL_CTRL_NO_OVERRIDE, (*prcm)->cm_dll_ctrl);
1541         }
1542
1543         if (!in_sdram)
1544                 dmm_init(DMM_BASE);
1545
1546         if (emif1_enabled)
1547                 do_sdram_init(EMIF1_BASE);
1548
1549         if (emif2_enabled)
1550                 do_sdram_init(EMIF2_BASE);
1551
1552         if (!(in_sdram || warm_reset())) {
1553                 if (emif1_enabled)
1554                         emif_post_init_config(EMIF1_BASE);
1555                 if (emif2_enabled)
1556                         emif_post_init_config(EMIF2_BASE);
1557         }
1558
1559         /* for the shadow registers to take effect */
1560         if (sdram_type == EMIF_SDRAM_TYPE_LPDDR2)
1561                 freq_update_core();
1562
1563         /* Do some testing after the init */
1564         if (!in_sdram) {
1565                 size_prog = omap_sdram_size();
1566                 size_prog = log_2_n_round_down(size_prog);
1567                 size_prog = (1 << size_prog);
1568
1569                 size_detect = get_ram_size((long *)CONFIG_SYS_SDRAM_BASE,
1570                                                 size_prog);
1571                 /* Compare with the size programmed */
1572                 if (size_detect != size_prog) {
1573                         printf("SDRAM: identified size not same as expected"
1574                                 " size identified: %x expected: %x\n",
1575                                 size_detect,
1576                                 size_prog);
1577                 } else
1578                         debug("get_ram_size() successful");
1579         }
1580
1581 #if defined(CONFIG_TI_SECURE_DEVICE)
1582         /*
1583          * On HS devices, do static EMIF firewall configuration
1584          * but only do it if not already running in SDRAM
1585          */
1586         if (!in_sdram)
1587                 if (0 != secure_emif_reserve())
1588                         hang();
1589
1590         /* On HS devices, ensure static EMIF firewall APIs are locked */
1591         if (0 != secure_emif_firewall_lock())
1592                 hang();
1593 #endif
1594
1595         if (sdram_type == EMIF_SDRAM_TYPE_DDR3 &&
1596             (!in_sdram && !warm_reset()) && (!is_dra7xx())) {
1597                 if (emif1_enabled)
1598                         do_bug0039_workaround(EMIF1_BASE);
1599                 if (emif2_enabled)
1600                         do_bug0039_workaround(EMIF2_BASE);
1601         }
1602
1603         debug("<<sdram_init()\n");
1604 }