Merge tag 'u-boot-atmel-fixes-2020.07-a' of https://gitlab.denx.de/u-boot/custodians...
[oweals/u-boot.git] / arch / arm / mach-omap2 / clocks-common.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  *
4  * Clock initialization for OMAP4
5  *
6  * (C) Copyright 2010
7  * Texas Instruments, <www.ti.com>
8  *
9  * Aneesh V <aneesh@ti.com>
10  *
11  * Based on previous work by:
12  *      Santosh Shilimkar <santosh.shilimkar@ti.com>
13  *      Rajendra Nayak <rnayak@ti.com>
14  */
15 #include <common.h>
16 #include <hang.h>
17 #include <i2c.h>
18 #include <init.h>
19 #include <log.h>
20 #include <asm/omap_common.h>
21 #include <asm/gpio.h>
22 #include <asm/arch/clock.h>
23 #include <asm/arch/sys_proto.h>
24 #include <asm/utils.h>
25 #include <asm/omap_gpio.h>
26 #include <asm/emif.h>
27
28 #ifndef CONFIG_SPL_BUILD
29 /*
30  * printing to console doesn't work unless
31  * this code is executed from SPL
32  */
33 #define printf(fmt, args...)
34 #define puts(s)
35 #endif
36
37 const u32 sys_clk_array[8] = {
38         12000000,              /* 12 MHz */
39         20000000,               /* 20 MHz */
40         16800000,              /* 16.8 MHz */
41         19200000,              /* 19.2 MHz */
42         26000000,              /* 26 MHz */
43         27000000,              /* 27 MHz */
44         38400000,              /* 38.4 MHz */
45 };
46
47 static inline u32 __get_sys_clk_index(void)
48 {
49         s8 ind;
50         /*
51          * For ES1 the ROM code calibration of sys clock is not reliable
52          * due to hw issue. So, use hard-coded value. If this value is not
53          * correct for any board over-ride this function in board file
54          * From ES2.0 onwards you will get this information from
55          * CM_SYS_CLKSEL
56          */
57         if (omap_revision() == OMAP4430_ES1_0)
58                 ind = OMAP_SYS_CLK_IND_38_4_MHZ;
59         else {
60                 /* SYS_CLKSEL - 1 to match the dpll param array indices */
61                 ind = (readl((*prcm)->cm_sys_clksel) &
62                         CM_SYS_CLKSEL_SYS_CLKSEL_MASK) - 1;
63         }
64         return ind;
65 }
66
67 u32 get_sys_clk_index(void)
68         __attribute__ ((weak, alias("__get_sys_clk_index")));
69
70 u32 get_sys_clk_freq(void)
71 {
72         u8 index = get_sys_clk_index();
73         return sys_clk_array[index];
74 }
75
76 void setup_post_dividers(u32 const base, const struct dpll_params *params)
77 {
78         struct dpll_regs *const dpll_regs = (struct dpll_regs *)base;
79
80         /* Setup post-dividers */
81         if (params->m2 >= 0)
82                 writel(params->m2, &dpll_regs->cm_div_m2_dpll);
83         if (params->m3 >= 0)
84                 writel(params->m3, &dpll_regs->cm_div_m3_dpll);
85         if (params->m4_h11 >= 0)
86                 writel(params->m4_h11, &dpll_regs->cm_div_m4_h11_dpll);
87         if (params->m5_h12 >= 0)
88                 writel(params->m5_h12, &dpll_regs->cm_div_m5_h12_dpll);
89         if (params->m6_h13 >= 0)
90                 writel(params->m6_h13, &dpll_regs->cm_div_m6_h13_dpll);
91         if (params->m7_h14 >= 0)
92                 writel(params->m7_h14, &dpll_regs->cm_div_m7_h14_dpll);
93         if (params->h21 >= 0)
94                 writel(params->h21, &dpll_regs->cm_div_h21_dpll);
95         if (params->h22 >= 0)
96                 writel(params->h22, &dpll_regs->cm_div_h22_dpll);
97         if (params->h23 >= 0)
98                 writel(params->h23, &dpll_regs->cm_div_h23_dpll);
99         if (params->h24 >= 0)
100                 writel(params->h24, &dpll_regs->cm_div_h24_dpll);
101 }
102
103 static inline void do_bypass_dpll(u32 const base)
104 {
105         struct dpll_regs *dpll_regs = (struct dpll_regs *)base;
106
107         clrsetbits_le32(&dpll_regs->cm_clkmode_dpll,
108                         CM_CLKMODE_DPLL_DPLL_EN_MASK,
109                         DPLL_EN_FAST_RELOCK_BYPASS <<
110                         CM_CLKMODE_DPLL_EN_SHIFT);
111 }
112
113 static inline void wait_for_bypass(u32 const base)
114 {
115         struct dpll_regs *const dpll_regs = (struct dpll_regs *)base;
116
117         if (!wait_on_value(ST_DPLL_CLK_MASK, 0, &dpll_regs->cm_idlest_dpll,
118                                 LDELAY)) {
119                 printf("Bypassing DPLL failed %x\n", base);
120         }
121 }
122
123 static inline void do_lock_dpll(u32 const base)
124 {
125         struct dpll_regs *const dpll_regs = (struct dpll_regs *)base;
126
127         clrsetbits_le32(&dpll_regs->cm_clkmode_dpll,
128                       CM_CLKMODE_DPLL_DPLL_EN_MASK,
129                       DPLL_EN_LOCK << CM_CLKMODE_DPLL_EN_SHIFT);
130 }
131
132 static inline void wait_for_lock(u32 const base)
133 {
134         struct dpll_regs *const dpll_regs = (struct dpll_regs *)base;
135
136         if (!wait_on_value(ST_DPLL_CLK_MASK, ST_DPLL_CLK_MASK,
137                 &dpll_regs->cm_idlest_dpll, LDELAY)) {
138                 printf("DPLL locking failed for %x\n", base);
139                 hang();
140         }
141 }
142
143 inline u32 check_for_lock(u32 const base)
144 {
145         struct dpll_regs *const dpll_regs = (struct dpll_regs *)base;
146         u32 lock = readl(&dpll_regs->cm_idlest_dpll) & ST_DPLL_CLK_MASK;
147
148         return lock;
149 }
150
151 const struct dpll_params *get_mpu_dpll_params(struct dplls const *dpll_data)
152 {
153         u32 sysclk_ind = get_sys_clk_index();
154         return &dpll_data->mpu[sysclk_ind];
155 }
156
157 const struct dpll_params *get_core_dpll_params(struct dplls const *dpll_data)
158 {
159         u32 sysclk_ind = get_sys_clk_index();
160         return &dpll_data->core[sysclk_ind];
161 }
162
163 const struct dpll_params *get_per_dpll_params(struct dplls const *dpll_data)
164 {
165         u32 sysclk_ind = get_sys_clk_index();
166         return &dpll_data->per[sysclk_ind];
167 }
168
169 const struct dpll_params *get_iva_dpll_params(struct dplls const *dpll_data)
170 {
171         u32 sysclk_ind = get_sys_clk_index();
172         return &dpll_data->iva[sysclk_ind];
173 }
174
175 const struct dpll_params *get_usb_dpll_params(struct dplls const *dpll_data)
176 {
177         u32 sysclk_ind = get_sys_clk_index();
178         return &dpll_data->usb[sysclk_ind];
179 }
180
181 const struct dpll_params *get_abe_dpll_params(struct dplls const *dpll_data)
182 {
183 #ifdef CONFIG_SYS_OMAP_ABE_SYSCK
184         u32 sysclk_ind = get_sys_clk_index();
185         return &dpll_data->abe[sysclk_ind];
186 #else
187         return dpll_data->abe;
188 #endif
189 }
190
191 static const struct dpll_params *get_ddr_dpll_params
192                         (struct dplls const *dpll_data)
193 {
194         u32 sysclk_ind = get_sys_clk_index();
195
196         if (!dpll_data->ddr)
197                 return NULL;
198         return &dpll_data->ddr[sysclk_ind];
199 }
200
201 #ifdef CONFIG_DRIVER_TI_CPSW
202 static const struct dpll_params *get_gmac_dpll_params
203                         (struct dplls const *dpll_data)
204 {
205         u32 sysclk_ind = get_sys_clk_index();
206
207         if (!dpll_data->gmac)
208                 return NULL;
209         return &dpll_data->gmac[sysclk_ind];
210 }
211 #endif
212
213 static void do_setup_dpll(u32 const base, const struct dpll_params *params,
214                                 u8 lock, char *dpll)
215 {
216         u32 temp, M, N;
217         struct dpll_regs *const dpll_regs = (struct dpll_regs *)base;
218
219         if (!params)
220                 return;
221
222         temp = readl(&dpll_regs->cm_clksel_dpll);
223
224         if (check_for_lock(base)) {
225                 /*
226                  * The Dpll has already been locked by rom code using CH.
227                  * Check if M,N are matching with Ideal nominal opp values.
228                  * If matches, skip the rest otherwise relock.
229                  */
230                 M = (temp & CM_CLKSEL_DPLL_M_MASK) >> CM_CLKSEL_DPLL_M_SHIFT;
231                 N = (temp & CM_CLKSEL_DPLL_N_MASK) >> CM_CLKSEL_DPLL_N_SHIFT;
232                 if ((M != (params->m)) || (N != (params->n))) {
233                         debug("\n %s Dpll locked, but not for ideal M = %d,"
234                                 "N = %d values, current values are M = %d,"
235                                 "N= %d" , dpll, params->m, params->n,
236                                 M, N);
237                 } else {
238                         /* Dpll locked with ideal values for nominal opps. */
239                         debug("\n %s Dpll already locked with ideal"
240                                                 "nominal opp values", dpll);
241
242                         bypass_dpll(base);
243                         goto setup_post_dividers;
244                 }
245         }
246
247         bypass_dpll(base);
248
249         /* Set M & N */
250         temp &= ~CM_CLKSEL_DPLL_M_MASK;
251         temp |= (params->m << CM_CLKSEL_DPLL_M_SHIFT) & CM_CLKSEL_DPLL_M_MASK;
252
253         temp &= ~CM_CLKSEL_DPLL_N_MASK;
254         temp |= (params->n << CM_CLKSEL_DPLL_N_SHIFT) & CM_CLKSEL_DPLL_N_MASK;
255
256         writel(temp, &dpll_regs->cm_clksel_dpll);
257
258 setup_post_dividers:
259         setup_post_dividers(base, params);
260
261         /* Lock */
262         if (lock)
263                 do_lock_dpll(base);
264
265         /* Wait till the DPLL locks */
266         if (lock)
267                 wait_for_lock(base);
268 }
269
270 u32 omap_ddr_clk(void)
271 {
272         u32 ddr_clk, sys_clk_khz, omap_rev, divider;
273         const struct dpll_params *core_dpll_params;
274
275         omap_rev = omap_revision();
276         sys_clk_khz = get_sys_clk_freq() / 1000;
277
278         core_dpll_params = get_core_dpll_params(*dplls_data);
279
280         debug("sys_clk %d\n ", sys_clk_khz * 1000);
281
282         /* Find Core DPLL locked frequency first */
283         ddr_clk = sys_clk_khz * 2 * core_dpll_params->m /
284                         (core_dpll_params->n + 1);
285
286         if (omap_rev < OMAP5430_ES1_0) {
287                 /*
288                  * DDR frequency is PHY_ROOT_CLK/2
289                  * PHY_ROOT_CLK = Fdpll/2/M2
290                  */
291                 divider = 4;
292         } else {
293                 /*
294                  * DDR frequency is PHY_ROOT_CLK
295                  * PHY_ROOT_CLK = Fdpll/2/M2
296                  */
297                 divider = 2;
298         }
299
300         ddr_clk = ddr_clk / divider / core_dpll_params->m2;
301         ddr_clk *= 1000;        /* convert to Hz */
302         debug("ddr_clk %d\n ", ddr_clk);
303
304         return ddr_clk;
305 }
306
307 /*
308  * Lock MPU dpll
309  *
310  * Resulting MPU frequencies:
311  * 4430 ES1.0   : 600 MHz
312  * 4430 ES2.x   : 792 MHz (OPP Turbo)
313  * 4460         : 920 MHz (OPP Turbo) - DCC disabled
314  */
315 void configure_mpu_dpll(void)
316 {
317         const struct dpll_params *params;
318         struct dpll_regs *mpu_dpll_regs;
319         u32 omap_rev;
320         omap_rev = omap_revision();
321
322         /*
323          * DCC and clock divider settings for 4460.
324          * DCC is required, if more than a certain frequency is required.
325          * For, 4460 > 1GHZ.
326          *     5430 > 1.4GHZ.
327          */
328         if ((omap_rev >= OMAP4460_ES1_0) && (omap_rev < OMAP5430_ES1_0)) {
329                 mpu_dpll_regs =
330                         (struct dpll_regs *)((*prcm)->cm_clkmode_dpll_mpu);
331                 bypass_dpll((*prcm)->cm_clkmode_dpll_mpu);
332                 clrbits_le32((*prcm)->cm_mpu_mpu_clkctrl,
333                         MPU_CLKCTRL_CLKSEL_EMIF_DIV_MODE_MASK);
334                 setbits_le32((*prcm)->cm_mpu_mpu_clkctrl,
335                         MPU_CLKCTRL_CLKSEL_ABE_DIV_MODE_MASK);
336                 clrbits_le32(&mpu_dpll_regs->cm_clksel_dpll,
337                         CM_CLKSEL_DCC_EN_MASK);
338         }
339
340         params = get_mpu_dpll_params(*dplls_data);
341
342         do_setup_dpll((*prcm)->cm_clkmode_dpll_mpu, params, DPLL_LOCK, "mpu");
343         debug("MPU DPLL locked\n");
344 }
345
346 #if defined(CONFIG_USB_EHCI_OMAP) || defined(CONFIG_USB_XHCI_OMAP) || \
347         defined(CONFIG_USB_MUSB_OMAP2PLUS)
348 static void setup_usb_dpll(void)
349 {
350         const struct dpll_params *params;
351         u32 sys_clk_khz, sd_div, num, den;
352
353         sys_clk_khz = get_sys_clk_freq() / 1000;
354         /*
355          * USB:
356          * USB dpll is J-type. Need to set DPLL_SD_DIV for jitter correction
357          * DPLL_SD_DIV = CEILING ([DPLL_MULT/(DPLL_DIV+1)]* CLKINP / 250)
358          *      - where CLKINP is sys_clk in MHz
359          * Use CLKINP in KHz and adjust the denominator accordingly so
360          * that we have enough accuracy and at the same time no overflow
361          */
362         params = get_usb_dpll_params(*dplls_data);
363         num = params->m * sys_clk_khz;
364         den = (params->n + 1) * 250 * 1000;
365         num += den - 1;
366         sd_div = num / den;
367         clrsetbits_le32((*prcm)->cm_clksel_dpll_usb,
368                         CM_CLKSEL_DPLL_DPLL_SD_DIV_MASK,
369                         sd_div << CM_CLKSEL_DPLL_DPLL_SD_DIV_SHIFT);
370
371         /* Now setup the dpll with the regular function */
372         do_setup_dpll((*prcm)->cm_clkmode_dpll_usb, params, DPLL_LOCK, "usb");
373 }
374 #endif
375
376 static void setup_dplls(void)
377 {
378         u32 temp;
379         const struct dpll_params *params;
380         struct emif_reg_struct *emif = (struct emif_reg_struct *)EMIF1_BASE;
381
382         debug("setup_dplls\n");
383
384         /* CORE dpll */
385         params = get_core_dpll_params(*dplls_data);     /* default - safest */
386         /*
387          * Do not lock the core DPLL now. Just set it up.
388          * Core DPLL will be locked after setting up EMIF
389          * using the FREQ_UPDATE method(freq_update_core())
390          */
391         if (emif_sdram_type(readl(&emif->emif_sdram_config)) ==
392             EMIF_SDRAM_TYPE_LPDDR2)
393                 do_setup_dpll((*prcm)->cm_clkmode_dpll_core, params,
394                                                         DPLL_NO_LOCK, "core");
395         else
396                 do_setup_dpll((*prcm)->cm_clkmode_dpll_core, params,
397                                                         DPLL_LOCK, "core");
398         /* Set the ratios for CORE_CLK, L3_CLK, L4_CLK */
399         temp = (CLKSEL_CORE_X2_DIV_1 << CLKSEL_CORE_SHIFT) |
400             (CLKSEL_L3_CORE_DIV_2 << CLKSEL_L3_SHIFT) |
401             (CLKSEL_L4_L3_DIV_2 << CLKSEL_L4_SHIFT);
402         writel(temp, (*prcm)->cm_clksel_core);
403         debug("Core DPLL configured\n");
404
405         /* lock PER dpll */
406         params = get_per_dpll_params(*dplls_data);
407         do_setup_dpll((*prcm)->cm_clkmode_dpll_per,
408                         params, DPLL_LOCK, "per");
409         debug("PER DPLL locked\n");
410
411         /* MPU dpll */
412         configure_mpu_dpll();
413
414 #if defined(CONFIG_USB_EHCI_OMAP) || defined(CONFIG_USB_XHCI_OMAP) || \
415         defined(CONFIG_USB_MUSB_OMAP2PLUS)
416         setup_usb_dpll();
417 #endif
418         params = get_ddr_dpll_params(*dplls_data);
419         do_setup_dpll((*prcm)->cm_clkmode_dpll_ddrphy,
420                       params, DPLL_LOCK, "ddr");
421
422 #ifdef CONFIG_DRIVER_TI_CPSW
423         params = get_gmac_dpll_params(*dplls_data);
424         do_setup_dpll((*prcm)->cm_clkmode_dpll_gmac, params,
425                       DPLL_LOCK, "gmac");
426 #endif
427 }
428
429 u32 get_offset_code(u32 volt_offset, struct pmic_data *pmic)
430 {
431         u32 offset_code;
432
433         volt_offset -= pmic->base_offset;
434
435         offset_code = (volt_offset + pmic->step - 1) / pmic->step;
436
437         /*
438          * Offset codes 1-6 all give the base voltage in Palmas
439          * Offset code 0 switches OFF the SMPS
440          */
441         return offset_code + pmic->start_code;
442 }
443
444 void do_scale_vcore(u32 vcore_reg, u32 volt_mv, struct pmic_data *pmic)
445 {
446         u32 offset_code;
447         u32 offset = volt_mv;
448         int ret = 0;
449
450         if (!volt_mv)
451                 return;
452
453         pmic->pmic_bus_init();
454         /* See if we can first get the GPIO if needed */
455         if (pmic->gpio_en)
456                 ret = gpio_request(pmic->gpio, "PMIC_GPIO");
457
458         if (ret < 0) {
459                 printf("%s: gpio %d request failed %d\n", __func__,
460                                                         pmic->gpio, ret);
461                 return;
462         }
463
464         /* Pull the GPIO low to select SET0 register, while we program SET1 */
465         if (pmic->gpio_en)
466                 gpio_direction_output(pmic->gpio, 0);
467
468         /* convert to uV for better accuracy in the calculations */
469         offset *= 1000;
470
471         offset_code = get_offset_code(offset, pmic);
472
473         debug("do_scale_vcore: volt - %d offset_code - 0x%x\n", volt_mv,
474                 offset_code);
475
476         if (pmic->pmic_write(pmic->i2c_slave_addr, vcore_reg, offset_code))
477                 printf("Scaling voltage failed for 0x%x\n", vcore_reg);
478         if (pmic->gpio_en)
479                 gpio_direction_output(pmic->gpio, 1);
480 }
481
482 int __weak get_voltrail_opp(int rail_offset)
483 {
484         /*
485          * By default return OPP_NOM for all voltage rails.
486          */
487         return OPP_NOM;
488 }
489
490 static u32 optimize_vcore_voltage(struct volts const *v, int opp)
491 {
492         u32 val;
493
494         if (!v->value[opp])
495                 return 0;
496         if (!v->efuse.reg[opp])
497                 return v->value[opp];
498
499         switch (v->efuse.reg_bits) {
500         case 16:
501                 val = readw(v->efuse.reg[opp]);
502                 break;
503         case 32:
504                 val = readl(v->efuse.reg[opp]);
505                 break;
506         default:
507                 printf("Error: efuse 0x%08x bits=%d unknown\n",
508                        v->efuse.reg[opp], v->efuse.reg_bits);
509                 return v->value[opp];
510         }
511
512         if (!val) {
513                 printf("Error: efuse 0x%08x bits=%d val=0, using %d\n",
514                        v->efuse.reg[opp], v->efuse.reg_bits, v->value[opp]);
515                 return v->value[opp];
516         }
517
518         debug("%s:efuse 0x%08x bits=%d Vnom=%d, using efuse value %d\n",
519               __func__, v->efuse.reg[opp], v->efuse.reg_bits, v->value[opp],
520               val);
521         return val;
522 }
523
524 #ifdef CONFIG_IODELAY_RECALIBRATION
525 void __weak recalibrate_iodelay(void)
526 {
527 }
528 #endif
529
530 /*
531  * Setup the voltages for the main SoC core power domains.
532  * We start with the maximum voltages allowed here, as set in the corresponding
533  * vcores_data struct, and then scale (usually down) to the fused values that
534  * are retrieved from the SoC. The scaling happens only if the efuse.reg fields
535  * are initialised.
536  * Rail grouping is supported for the DRA7xx SoCs only, therefore the code is
537  * compiled conditionally. Note that the new code writes the scaled (or zeroed)
538  * values back to the vcores_data struct for eventual reuse. Zero values mean
539  * that the corresponding rails are not controlled separately, and are not sent
540  * to the PMIC.
541  */
542 void scale_vcores(struct vcores_data const *vcores)
543 {
544         int i, opp, j, ol;
545         struct volts *pv = (struct volts *)vcores;
546         struct volts *px;
547
548         for (i=0; i<(sizeof(struct vcores_data)/sizeof(struct volts)); i++) {
549                 opp = get_voltrail_opp(i);
550                 debug("%d -> ", pv->value[opp]);
551
552                 if (pv->value[opp]) {
553                         /* Handle non-empty members only */
554                         pv->value[opp] = optimize_vcore_voltage(pv, opp);
555                         px = (struct volts *)vcores;
556                         j = 0;
557                         while (px < pv) {
558                                 /*
559                                  * Scan already handled non-empty members to see
560                                  * if we have a group and find the max voltage,
561                                  * which is set to the first occurance of the
562                                  * particular SMPS; the other group voltages are
563                                  * zeroed.
564                                  */
565                                 ol = get_voltrail_opp(j);
566                                 if (px->value[ol] &&
567                                     (pv->pmic->i2c_slave_addr ==
568                                      px->pmic->i2c_slave_addr) &&
569                                     (pv->addr == px->addr)) {
570                                         /* Same PMIC, same SMPS */
571                                         if (pv->value[opp] > px->value[ol])
572                                                 px->value[ol] = pv->value[opp];
573
574                                         pv->value[opp] = 0;
575                                 }
576                                 px++;
577                                 j++;
578                         }
579                 }
580                 debug("%d\n", pv->value[opp]);
581                 pv++;
582         }
583
584         opp = get_voltrail_opp(VOLT_CORE);
585         debug("cor: %d\n", vcores->core.value[opp]);
586         do_scale_vcore(vcores->core.addr, vcores->core.value[opp],
587                        vcores->core.pmic);
588         /*
589          * IO delay recalibration should be done immediately after
590          * adjusting AVS voltages for VDD_CORE_L.
591          * Respective boards should call __recalibrate_iodelay()
592          * with proper mux, virtual and manual mode configurations.
593          */
594 #ifdef CONFIG_IODELAY_RECALIBRATION
595         recalibrate_iodelay();
596 #endif
597
598         opp = get_voltrail_opp(VOLT_MPU);
599         debug("mpu: %d\n", vcores->mpu.value[opp]);
600         do_scale_vcore(vcores->mpu.addr, vcores->mpu.value[opp],
601                        vcores->mpu.pmic);
602         /* Configure MPU ABB LDO after scale */
603         abb_setup(vcores->mpu.efuse.reg[opp],
604                   (*ctrl)->control_wkup_ldovbb_mpu_voltage_ctrl,
605                   (*prcm)->prm_abbldo_mpu_setup,
606                   (*prcm)->prm_abbldo_mpu_ctrl,
607                   (*prcm)->prm_irqstatus_mpu_2,
608                   vcores->mpu.abb_tx_done_mask,
609                   OMAP_ABB_FAST_OPP);
610
611         opp = get_voltrail_opp(VOLT_MM);
612         debug("mm: %d\n", vcores->mm.value[opp]);
613         do_scale_vcore(vcores->mm.addr, vcores->mm.value[opp],
614                        vcores->mm.pmic);
615         /* Configure MM ABB LDO after scale */
616         abb_setup(vcores->mm.efuse.reg[opp],
617                   (*ctrl)->control_wkup_ldovbb_mm_voltage_ctrl,
618                   (*prcm)->prm_abbldo_mm_setup,
619                   (*prcm)->prm_abbldo_mm_ctrl,
620                   (*prcm)->prm_irqstatus_mpu,
621                   vcores->mm.abb_tx_done_mask,
622                   OMAP_ABB_FAST_OPP);
623
624         opp = get_voltrail_opp(VOLT_GPU);
625         debug("gpu: %d\n", vcores->gpu.value[opp]);
626         do_scale_vcore(vcores->gpu.addr, vcores->gpu.value[opp],
627                        vcores->gpu.pmic);
628         /* Configure GPU ABB LDO after scale */
629         abb_setup(vcores->gpu.efuse.reg[opp],
630                   (*ctrl)->control_wkup_ldovbb_gpu_voltage_ctrl,
631                   (*prcm)->prm_abbldo_gpu_setup,
632                   (*prcm)->prm_abbldo_gpu_ctrl,
633                   (*prcm)->prm_irqstatus_mpu,
634                   vcores->gpu.abb_tx_done_mask,
635                   OMAP_ABB_FAST_OPP);
636
637         opp = get_voltrail_opp(VOLT_EVE);
638         debug("eve: %d\n", vcores->eve.value[opp]);
639         do_scale_vcore(vcores->eve.addr, vcores->eve.value[opp],
640                        vcores->eve.pmic);
641         /* Configure EVE ABB LDO after scale */
642         abb_setup(vcores->eve.efuse.reg[opp],
643                   (*ctrl)->control_wkup_ldovbb_eve_voltage_ctrl,
644                   (*prcm)->prm_abbldo_eve_setup,
645                   (*prcm)->prm_abbldo_eve_ctrl,
646                   (*prcm)->prm_irqstatus_mpu,
647                   vcores->eve.abb_tx_done_mask,
648                   OMAP_ABB_FAST_OPP);
649
650         opp = get_voltrail_opp(VOLT_IVA);
651         debug("iva: %d\n", vcores->iva.value[opp]);
652         do_scale_vcore(vcores->iva.addr, vcores->iva.value[opp],
653                        vcores->iva.pmic);
654         /* Configure IVA ABB LDO after scale */
655         abb_setup(vcores->iva.efuse.reg[opp],
656                   (*ctrl)->control_wkup_ldovbb_iva_voltage_ctrl,
657                   (*prcm)->prm_abbldo_iva_setup,
658                   (*prcm)->prm_abbldo_iva_ctrl,
659                   (*prcm)->prm_irqstatus_mpu,
660                   vcores->iva.abb_tx_done_mask,
661                   OMAP_ABB_FAST_OPP);
662 }
663
664 static inline void enable_clock_domain(u32 const clkctrl_reg, u32 enable_mode)
665 {
666         clrsetbits_le32(clkctrl_reg, CD_CLKCTRL_CLKTRCTRL_MASK,
667                         enable_mode << CD_CLKCTRL_CLKTRCTRL_SHIFT);
668         debug("Enable clock domain - %x\n", clkctrl_reg);
669 }
670
671 static inline void disable_clock_domain(u32 const clkctrl_reg)
672 {
673         clrsetbits_le32(clkctrl_reg, CD_CLKCTRL_CLKTRCTRL_MASK,
674                         CD_CLKCTRL_CLKTRCTRL_SW_SLEEP <<
675                         CD_CLKCTRL_CLKTRCTRL_SHIFT);
676         debug("Disable clock domain - %x\n", clkctrl_reg);
677 }
678
679 static inline void wait_for_clk_enable(u32 clkctrl_addr)
680 {
681         u32 clkctrl, idlest = MODULE_CLKCTRL_IDLEST_DISABLED;
682         u32 bound = LDELAY;
683
684         while ((idlest == MODULE_CLKCTRL_IDLEST_DISABLED) ||
685                 (idlest == MODULE_CLKCTRL_IDLEST_TRANSITIONING)) {
686
687                 clkctrl = readl(clkctrl_addr);
688                 idlest = (clkctrl & MODULE_CLKCTRL_IDLEST_MASK) >>
689                          MODULE_CLKCTRL_IDLEST_SHIFT;
690                 if (--bound == 0) {
691                         printf("Clock enable failed for 0x%x idlest 0x%x\n",
692                                 clkctrl_addr, clkctrl);
693                         return;
694                 }
695         }
696 }
697
698 static inline void enable_clock_module(u32 const clkctrl_addr, u32 enable_mode,
699                                 u32 wait_for_enable)
700 {
701         clrsetbits_le32(clkctrl_addr, MODULE_CLKCTRL_MODULEMODE_MASK,
702                         enable_mode << MODULE_CLKCTRL_MODULEMODE_SHIFT);
703         debug("Enable clock module - %x\n", clkctrl_addr);
704         if (wait_for_enable)
705                 wait_for_clk_enable(clkctrl_addr);
706 }
707
708 static inline void wait_for_clk_disable(u32 clkctrl_addr)
709 {
710         u32 clkctrl, idlest = MODULE_CLKCTRL_IDLEST_FULLY_FUNCTIONAL;
711         u32 bound = LDELAY;
712
713         while ((idlest != MODULE_CLKCTRL_IDLEST_DISABLED)) {
714                 clkctrl = readl(clkctrl_addr);
715                 idlest = (clkctrl & MODULE_CLKCTRL_IDLEST_MASK) >>
716                          MODULE_CLKCTRL_IDLEST_SHIFT;
717                 if (--bound == 0) {
718                         printf("Clock disable failed for 0x%x idlest 0x%x\n",
719                                clkctrl_addr, clkctrl);
720                         return;
721                 }
722         }
723 }
724
725 static inline void disable_clock_module(u32 const clkctrl_addr,
726                                         u32 wait_for_disable)
727 {
728         clrsetbits_le32(clkctrl_addr, MODULE_CLKCTRL_MODULEMODE_MASK,
729                         MODULE_CLKCTRL_MODULEMODE_SW_DISABLE <<
730                         MODULE_CLKCTRL_MODULEMODE_SHIFT);
731         debug("Disable clock module - %x\n", clkctrl_addr);
732         if (wait_for_disable)
733                 wait_for_clk_disable(clkctrl_addr);
734 }
735
736 void freq_update_core(void)
737 {
738         u32 freq_config1 = 0;
739         const struct dpll_params *core_dpll_params;
740         u32 omap_rev = omap_revision();
741
742         core_dpll_params = get_core_dpll_params(*dplls_data);
743         /* Put EMIF clock domain in sw wakeup mode */
744         enable_clock_domain((*prcm)->cm_memif_clkstctrl,
745                                 CD_CLKCTRL_CLKTRCTRL_SW_WKUP);
746         wait_for_clk_enable((*prcm)->cm_memif_emif_1_clkctrl);
747         wait_for_clk_enable((*prcm)->cm_memif_emif_2_clkctrl);
748
749         freq_config1 = SHADOW_FREQ_CONFIG1_FREQ_UPDATE_MASK |
750             SHADOW_FREQ_CONFIG1_DLL_RESET_MASK;
751
752         freq_config1 |= (DPLL_EN_LOCK << SHADOW_FREQ_CONFIG1_DPLL_EN_SHIFT) &
753                                 SHADOW_FREQ_CONFIG1_DPLL_EN_MASK;
754
755         freq_config1 |= (core_dpll_params->m2 <<
756                         SHADOW_FREQ_CONFIG1_M2_DIV_SHIFT) &
757                         SHADOW_FREQ_CONFIG1_M2_DIV_MASK;
758
759         writel(freq_config1, (*prcm)->cm_shadow_freq_config1);
760         if (!wait_on_value(SHADOW_FREQ_CONFIG1_FREQ_UPDATE_MASK, 0,
761                         (u32 *) (*prcm)->cm_shadow_freq_config1, LDELAY)) {
762                 puts("FREQ UPDATE procedure failed!!");
763                 hang();
764         }
765
766         /*
767          * Putting EMIF in HW_AUTO is seen to be causing issues with
768          * EMIF clocks and the master DLL. Keep EMIF in SW_WKUP
769          * in OMAP5430 ES1.0 silicon
770          */
771         if (omap_rev != OMAP5430_ES1_0) {
772                 /* Put EMIF clock domain back in hw auto mode */
773                 enable_clock_domain((*prcm)->cm_memif_clkstctrl,
774                                         CD_CLKCTRL_CLKTRCTRL_HW_AUTO);
775                 wait_for_clk_enable((*prcm)->cm_memif_emif_1_clkctrl);
776                 wait_for_clk_enable((*prcm)->cm_memif_emif_2_clkctrl);
777         }
778 }
779
780 void bypass_dpll(u32 const base)
781 {
782         do_bypass_dpll(base);
783         wait_for_bypass(base);
784 }
785
786 void lock_dpll(u32 const base)
787 {
788         do_lock_dpll(base);
789         wait_for_lock(base);
790 }
791
792 static void setup_clocks_for_console(void)
793 {
794         /* Do not add any spl_debug prints in this function */
795         clrsetbits_le32((*prcm)->cm_l4per_clkstctrl, CD_CLKCTRL_CLKTRCTRL_MASK,
796                         CD_CLKCTRL_CLKTRCTRL_SW_WKUP <<
797                         CD_CLKCTRL_CLKTRCTRL_SHIFT);
798
799         /* Enable all UARTs - console will be on one of them */
800         clrsetbits_le32((*prcm)->cm_l4per_uart1_clkctrl,
801                         MODULE_CLKCTRL_MODULEMODE_MASK,
802                         MODULE_CLKCTRL_MODULEMODE_SW_EXPLICIT_EN <<
803                         MODULE_CLKCTRL_MODULEMODE_SHIFT);
804
805         clrsetbits_le32((*prcm)->cm_l4per_uart2_clkctrl,
806                         MODULE_CLKCTRL_MODULEMODE_MASK,
807                         MODULE_CLKCTRL_MODULEMODE_SW_EXPLICIT_EN <<
808                         MODULE_CLKCTRL_MODULEMODE_SHIFT);
809
810         clrsetbits_le32((*prcm)->cm_l4per_uart3_clkctrl,
811                         MODULE_CLKCTRL_MODULEMODE_MASK,
812                         MODULE_CLKCTRL_MODULEMODE_SW_EXPLICIT_EN <<
813                         MODULE_CLKCTRL_MODULEMODE_SHIFT);
814
815         clrsetbits_le32((*prcm)->cm_l4per_uart4_clkctrl,
816                         MODULE_CLKCTRL_MODULEMODE_MASK,
817                         MODULE_CLKCTRL_MODULEMODE_SW_EXPLICIT_EN <<
818                         MODULE_CLKCTRL_MODULEMODE_SHIFT);
819
820         clrsetbits_le32((*prcm)->cm_l4per_clkstctrl, CD_CLKCTRL_CLKTRCTRL_MASK,
821                         CD_CLKCTRL_CLKTRCTRL_HW_AUTO <<
822                         CD_CLKCTRL_CLKTRCTRL_SHIFT);
823 }
824
825 void do_enable_clocks(u32 const *clk_domains,
826                             u32 const *clk_modules_hw_auto,
827                             u32 const *clk_modules_explicit_en,
828                             u8 wait_for_enable)
829 {
830         u32 i, max = 100;
831
832         /* Put the clock domains in SW_WKUP mode */
833         for (i = 0; (i < max) && clk_domains && clk_domains[i]; i++) {
834                 enable_clock_domain(clk_domains[i],
835                                     CD_CLKCTRL_CLKTRCTRL_SW_WKUP);
836         }
837
838         /* Clock modules that need to be put in HW_AUTO */
839         for (i = 0; (i < max) && clk_modules_hw_auto &&
840                      clk_modules_hw_auto[i]; i++) {
841                 enable_clock_module(clk_modules_hw_auto[i],
842                                     MODULE_CLKCTRL_MODULEMODE_HW_AUTO,
843                                     wait_for_enable);
844         };
845
846         /* Clock modules that need to be put in SW_EXPLICIT_EN mode */
847         for (i = 0; (i < max) && clk_modules_explicit_en &&
848                      clk_modules_explicit_en[i]; i++) {
849                 enable_clock_module(clk_modules_explicit_en[i],
850                                     MODULE_CLKCTRL_MODULEMODE_SW_EXPLICIT_EN,
851                                     wait_for_enable);
852         };
853
854         /* Put the clock domains in HW_AUTO mode now */
855         for (i = 0; (i < max) && clk_domains && clk_domains[i]; i++) {
856                 enable_clock_domain(clk_domains[i],
857                                     CD_CLKCTRL_CLKTRCTRL_HW_AUTO);
858         }
859 }
860
861 void do_disable_clocks(u32 const *clk_domains,
862                             u32 const *clk_modules_disable,
863                             u8 wait_for_disable)
864 {
865         u32 i, max = 100;
866
867
868         /* Clock modules that need to be put in SW_DISABLE */
869         for (i = 0; (i < max) && clk_modules_disable[i]; i++)
870                 disable_clock_module(clk_modules_disable[i],
871                                      wait_for_disable);
872
873         /* Put the clock domains in SW_SLEEP mode */
874         for (i = 0; (i < max) && clk_domains[i]; i++)
875                 disable_clock_domain(clk_domains[i]);
876 }
877
878 /**
879  * setup_early_clocks() - Setup early clocks needed for SoC
880  *
881  * Setup clocks for console, SPL basic initialization clocks and initialize
882  * the timer. This is invoked prior prcm_init.
883  */
884 void setup_early_clocks(void)
885 {
886         switch (omap_hw_init_context()) {
887         case OMAP_INIT_CONTEXT_SPL:
888         case OMAP_INIT_CONTEXT_UBOOT_FROM_NOR:
889         case OMAP_INIT_CONTEXT_UBOOT_AFTER_CH:
890                 setup_clocks_for_console();
891                 enable_basic_clocks();
892                 timer_init();
893                 /* Fall through */
894         }
895 }
896
897 void prcm_init(void)
898 {
899         switch (omap_hw_init_context()) {
900         case OMAP_INIT_CONTEXT_SPL:
901         case OMAP_INIT_CONTEXT_UBOOT_FROM_NOR:
902         case OMAP_INIT_CONTEXT_UBOOT_AFTER_CH:
903                 scale_vcores(*omap_vcores);
904                 setup_dplls();
905                 setup_warmreset_time();
906                 break;
907         default:
908                 break;
909         }
910
911         if (OMAP_INIT_CONTEXT_SPL != omap_hw_init_context())
912                 enable_basic_uboot_clocks();
913 }
914
915 #if !defined(CONFIG_DM_I2C)
916 void gpi2c_init(void)
917 {
918         static int gpi2c = 1;
919
920         if (gpi2c) {
921                 i2c_init(CONFIG_SYS_OMAP24_I2C_SPEED,
922                          CONFIG_SYS_OMAP24_I2C_SLAVE);
923                 gpi2c = 0;
924         }
925 }
926 #endif