board_f: Drop setup_dram_config() wrapper
[oweals/u-boot.git] / arch / arm / cpu / armv8 / fsl-layerscape / cpu.c
1 /*
2  * Copyright 2014-2015 Freescale Semiconductor, Inc.
3  *
4  * SPDX-License-Identifier:     GPL-2.0+
5  */
6
7 #include <common.h>
8 #include <asm/io.h>
9 #include <linux/errno.h>
10 #include <asm/system.h>
11 #include <asm/armv8/mmu.h>
12 #include <asm/io.h>
13 #include <asm/arch/fsl_serdes.h>
14 #include <asm/arch/soc.h>
15 #include <asm/arch/cpu.h>
16 #include <asm/arch/speed.h>
17 #ifdef CONFIG_MP
18 #include <asm/arch/mp.h>
19 #endif
20 #include <efi_loader.h>
21 #include <fm_eth.h>
22 #include <fsl-mc/fsl_mc.h>
23 #ifdef CONFIG_FSL_ESDHC
24 #include <fsl_esdhc.h>
25 #endif
26 #ifdef CONFIG_ARMV8_SEC_FIRMWARE_SUPPORT
27 #include <asm/armv8/sec_firmware.h>
28 #endif
29 #ifdef CONFIG_SYS_FSL_DDR
30 #include <fsl_ddr.h>
31 #endif
32
33 DECLARE_GLOBAL_DATA_PTR;
34
35 struct mm_region *mem_map = early_map;
36
37 void cpu_name(char *name)
38 {
39         struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
40         unsigned int i, svr, ver;
41
42         svr = gur_in32(&gur->svr);
43         ver = SVR_SOC_VER(svr);
44
45         for (i = 0; i < ARRAY_SIZE(cpu_type_list); i++)
46                 if ((cpu_type_list[i].soc_ver & SVR_WO_E) == ver) {
47                         strcpy(name, cpu_type_list[i].name);
48
49                         if (IS_E_PROCESSOR(svr))
50                                 strcat(name, "E");
51
52                         sprintf(name + strlen(name), " Rev%d.%d",
53                                 SVR_MAJ(svr), SVR_MIN(svr));
54                         break;
55                 }
56
57         if (i == ARRAY_SIZE(cpu_type_list))
58                 strcpy(name, "unknown");
59 }
60
61 #ifndef CONFIG_SYS_DCACHE_OFF
62 /*
63  * To start MMU before DDR is available, we create MMU table in SRAM.
64  * The base address of SRAM is CONFIG_SYS_FSL_OCRAM_BASE. We use three
65  * levels of translation tables here to cover 40-bit address space.
66  * We use 4KB granule size, with 40 bits physical address, T0SZ=24
67  * Address above EARLY_PGTABLE_SIZE (0x5000) is free for other purpose.
68  * Note, the debug print in cache_v8.c is not usable for debugging
69  * these early MMU tables because UART is not yet available.
70  */
71 static inline void early_mmu_setup(void)
72 {
73         unsigned int el = current_el();
74
75         /* global data is already setup, no allocation yet */
76         gd->arch.tlb_addr = CONFIG_SYS_FSL_OCRAM_BASE;
77         gd->arch.tlb_fillptr = gd->arch.tlb_addr;
78         gd->arch.tlb_size = EARLY_PGTABLE_SIZE;
79
80         /* Create early page tables */
81         setup_pgtables();
82
83         /* point TTBR to the new table */
84         set_ttbr_tcr_mair(el, gd->arch.tlb_addr,
85                           get_tcr(el, NULL, NULL) &
86                           ~(TCR_ORGN_MASK | TCR_IRGN_MASK),
87                           MEMORY_ATTRIBUTES);
88
89         set_sctlr(get_sctlr() | CR_M);
90 }
91
92 static void fix_pcie_mmu_map(void)
93 {
94 #ifdef CONFIG_LS2080A
95         unsigned int i;
96         u32 svr, ver;
97         struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
98
99         svr = gur_in32(&gur->svr);
100         ver = SVR_SOC_VER(svr);
101
102         /* Fix PCIE base and size for LS2088A */
103         if ((ver == SVR_LS2088A) || (ver == SVR_LS2084A) ||
104             (ver == SVR_LS2048A) || (ver == SVR_LS2044A)) {
105                 for (i = 0; i < ARRAY_SIZE(final_map); i++) {
106                         switch (final_map[i].phys) {
107                         case CONFIG_SYS_PCIE1_PHYS_ADDR:
108                                 final_map[i].phys = 0x2000000000ULL;
109                                 final_map[i].virt = 0x2000000000ULL;
110                                 final_map[i].size = 0x800000000ULL;
111                                 break;
112                         case CONFIG_SYS_PCIE2_PHYS_ADDR:
113                                 final_map[i].phys = 0x2800000000ULL;
114                                 final_map[i].virt = 0x2800000000ULL;
115                                 final_map[i].size = 0x800000000ULL;
116                                 break;
117                         case CONFIG_SYS_PCIE3_PHYS_ADDR:
118                                 final_map[i].phys = 0x3000000000ULL;
119                                 final_map[i].virt = 0x3000000000ULL;
120                                 final_map[i].size = 0x800000000ULL;
121                                 break;
122                         case CONFIG_SYS_PCIE4_PHYS_ADDR:
123                                 final_map[i].phys = 0x3800000000ULL;
124                                 final_map[i].virt = 0x3800000000ULL;
125                                 final_map[i].size = 0x800000000ULL;
126                                 break;
127                         default:
128                                 break;
129                         }
130                 }
131         }
132 #endif
133 }
134
135 /*
136  * The final tables look similar to early tables, but different in detail.
137  * These tables are in DRAM. Sub tables are added to enable cache for
138  * QBMan and OCRAM.
139  *
140  * Put the MMU table in secure memory if gd->arch.secure_ram is valid.
141  * OCRAM will be not used for this purpose so gd->arch.secure_ram can't be 0.
142  */
143 static inline void final_mmu_setup(void)
144 {
145         u64 tlb_addr_save = gd->arch.tlb_addr;
146         unsigned int el = current_el();
147         int index;
148
149         /* fix the final_map before filling in the block entries */
150         fix_pcie_mmu_map();
151
152         mem_map = final_map;
153
154         /* Update mapping for DDR to actual size */
155         for (index = 0; index < ARRAY_SIZE(final_map) - 2; index++) {
156                 /*
157                  * Find the entry for DDR mapping and update the address and
158                  * size. Zero-sized mapping will be skipped when creating MMU
159                  * table.
160                  */
161                 switch (final_map[index].virt) {
162                 case CONFIG_SYS_FSL_DRAM_BASE1:
163                         final_map[index].virt = gd->bd->bi_dram[0].start;
164                         final_map[index].phys = gd->bd->bi_dram[0].start;
165                         final_map[index].size = gd->bd->bi_dram[0].size;
166                         break;
167 #ifdef CONFIG_SYS_FSL_DRAM_BASE2
168                 case CONFIG_SYS_FSL_DRAM_BASE2:
169 #if (CONFIG_NR_DRAM_BANKS >= 2)
170                         final_map[index].virt = gd->bd->bi_dram[1].start;
171                         final_map[index].phys = gd->bd->bi_dram[1].start;
172                         final_map[index].size = gd->bd->bi_dram[1].size;
173 #else
174                         final_map[index].size = 0;
175 #endif
176                 break;
177 #endif
178 #ifdef CONFIG_SYS_FSL_DRAM_BASE3
179                 case CONFIG_SYS_FSL_DRAM_BASE3:
180 #if (CONFIG_NR_DRAM_BANKS >= 3)
181                         final_map[index].virt = gd->bd->bi_dram[2].start;
182                         final_map[index].phys = gd->bd->bi_dram[2].start;
183                         final_map[index].size = gd->bd->bi_dram[2].size;
184 #else
185                         final_map[index].size = 0;
186 #endif
187                 break;
188 #endif
189                 default:
190                         break;
191                 }
192         }
193
194 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
195         if (gd->arch.secure_ram & MEM_RESERVE_SECURE_MAINTAINED) {
196                 if (el == 3) {
197                         /*
198                          * Only use gd->arch.secure_ram if the address is
199                          * recalculated. Align to 4KB for MMU table.
200                          */
201                         /* put page tables in secure ram */
202                         index = ARRAY_SIZE(final_map) - 2;
203                         gd->arch.tlb_addr = gd->arch.secure_ram & ~0xfff;
204                         final_map[index].virt = gd->arch.secure_ram & ~0x3;
205                         final_map[index].phys = final_map[index].virt;
206                         final_map[index].size = CONFIG_SYS_MEM_RESERVE_SECURE;
207                         final_map[index].attrs = PTE_BLOCK_OUTER_SHARE;
208                         gd->arch.secure_ram |= MEM_RESERVE_SECURE_SECURED;
209                         tlb_addr_save = gd->arch.tlb_addr;
210                 } else {
211                         /* Use allocated (board_f.c) memory for TLB */
212                         tlb_addr_save = gd->arch.tlb_allocated;
213                         gd->arch.tlb_addr = tlb_addr_save;
214                 }
215         }
216 #endif
217
218         /* Reset the fill ptr */
219         gd->arch.tlb_fillptr = tlb_addr_save;
220
221         /* Create normal system page tables */
222         setup_pgtables();
223
224         /* Create emergency page tables */
225         gd->arch.tlb_addr = gd->arch.tlb_fillptr;
226         gd->arch.tlb_emerg = gd->arch.tlb_addr;
227         setup_pgtables();
228         gd->arch.tlb_addr = tlb_addr_save;
229
230         /* Disable cache and MMU */
231         dcache_disable();       /* TLBs are invalidated */
232         invalidate_icache_all();
233
234         /* point TTBR to the new table */
235         set_ttbr_tcr_mair(el, gd->arch.tlb_addr, get_tcr(el, NULL, NULL),
236                           MEMORY_ATTRIBUTES);
237
238         set_sctlr(get_sctlr() | CR_M);
239 }
240
241 u64 get_page_table_size(void)
242 {
243         return 0x10000;
244 }
245
246 int arch_cpu_init(void)
247 {
248         icache_enable();
249         __asm_invalidate_dcache_all();
250         __asm_invalidate_tlb_all();
251         early_mmu_setup();
252         set_sctlr(get_sctlr() | CR_C);
253         return 0;
254 }
255
256 void mmu_setup(void)
257 {
258         final_mmu_setup();
259 }
260
261 /*
262  * This function is called from common/board_r.c.
263  * It recreates MMU table in main memory.
264  */
265 void enable_caches(void)
266 {
267         mmu_setup();
268         __asm_invalidate_tlb_all();
269         icache_enable();
270         dcache_enable();
271 }
272 #endif
273
274 u32 initiator_type(u32 cluster, int init_id)
275 {
276         struct ccsr_gur *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
277         u32 idx = (cluster >> (init_id * 8)) & TP_CLUSTER_INIT_MASK;
278         u32 type = 0;
279
280         type = gur_in32(&gur->tp_ityp[idx]);
281         if (type & TP_ITYP_AV)
282                 return type;
283
284         return 0;
285 }
286
287 u32 cpu_pos_mask(void)
288 {
289         struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
290         int i = 0;
291         u32 cluster, type, mask = 0;
292
293         do {
294                 int j;
295
296                 cluster = gur_in32(&gur->tp_cluster[i].lower);
297                 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
298                         type = initiator_type(cluster, j);
299                         if (type && (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM))
300                                 mask |= 1 << (i * TP_INIT_PER_CLUSTER + j);
301                 }
302                 i++;
303         } while ((cluster & TP_CLUSTER_EOC) == 0x0);
304
305         return mask;
306 }
307
308 u32 cpu_mask(void)
309 {
310         struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
311         int i = 0, count = 0;
312         u32 cluster, type, mask = 0;
313
314         do {
315                 int j;
316
317                 cluster = gur_in32(&gur->tp_cluster[i].lower);
318                 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
319                         type = initiator_type(cluster, j);
320                         if (type) {
321                                 if (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM)
322                                         mask |= 1 << count;
323                                 count++;
324                         }
325                 }
326                 i++;
327         } while ((cluster & TP_CLUSTER_EOC) == 0x0);
328
329         return mask;
330 }
331
332 /*
333  * Return the number of cores on this SOC.
334  */
335 int cpu_numcores(void)
336 {
337         return hweight32(cpu_mask());
338 }
339
340 int fsl_qoriq_core_to_cluster(unsigned int core)
341 {
342         struct ccsr_gur __iomem *gur =
343                 (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR);
344         int i = 0, count = 0;
345         u32 cluster;
346
347         do {
348                 int j;
349
350                 cluster = gur_in32(&gur->tp_cluster[i].lower);
351                 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
352                         if (initiator_type(cluster, j)) {
353                                 if (count == core)
354                                         return i;
355                                 count++;
356                         }
357                 }
358                 i++;
359         } while ((cluster & TP_CLUSTER_EOC) == 0x0);
360
361         return -1;      /* cannot identify the cluster */
362 }
363
364 u32 fsl_qoriq_core_to_type(unsigned int core)
365 {
366         struct ccsr_gur __iomem *gur =
367                 (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR);
368         int i = 0, count = 0;
369         u32 cluster, type;
370
371         do {
372                 int j;
373
374                 cluster = gur_in32(&gur->tp_cluster[i].lower);
375                 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
376                         type = initiator_type(cluster, j);
377                         if (type) {
378                                 if (count == core)
379                                         return type;
380                                 count++;
381                         }
382                 }
383                 i++;
384         } while ((cluster & TP_CLUSTER_EOC) == 0x0);
385
386         return -1;      /* cannot identify the cluster */
387 }
388
389 #ifndef CONFIG_FSL_LSCH3
390 uint get_svr(void)
391 {
392         struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
393
394         return gur_in32(&gur->svr);
395 }
396 #endif
397
398 #ifdef CONFIG_DISPLAY_CPUINFO
399 int print_cpuinfo(void)
400 {
401         struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
402         struct sys_info sysinfo;
403         char buf[32];
404         unsigned int i, core;
405         u32 type, rcw, svr = gur_in32(&gur->svr);
406
407         puts("SoC: ");
408
409         cpu_name(buf);
410         printf(" %s (0x%x)\n", buf, svr);
411         memset((u8 *)buf, 0x00, ARRAY_SIZE(buf));
412         get_sys_info(&sysinfo);
413         puts("Clock Configuration:");
414         for_each_cpu(i, core, cpu_numcores(), cpu_mask()) {
415                 if (!(i % 3))
416                         puts("\n       ");
417                 type = TP_ITYP_VER(fsl_qoriq_core_to_type(core));
418                 printf("CPU%d(%s):%-4s MHz  ", core,
419                        type == TY_ITYP_VER_A7 ? "A7 " :
420                        (type == TY_ITYP_VER_A53 ? "A53" :
421                        (type == TY_ITYP_VER_A57 ? "A57" :
422                        (type == TY_ITYP_VER_A72 ? "A72" : "   "))),
423                        strmhz(buf, sysinfo.freq_processor[core]));
424         }
425         /* Display platform clock as Bus frequency. */
426         printf("\n       Bus:      %-4s MHz  ",
427                strmhz(buf, sysinfo.freq_systembus / CONFIG_SYS_FSL_PCLK_DIV));
428         printf("DDR:      %-4s MT/s", strmhz(buf, sysinfo.freq_ddrbus));
429 #ifdef CONFIG_SYS_DPAA_FMAN
430         printf("  FMAN:     %-4s MHz", strmhz(buf, sysinfo.freq_fman[0]));
431 #endif
432 #ifdef CONFIG_SYS_FSL_HAS_DP_DDR
433         if (soc_has_dp_ddr()) {
434                 printf("     DP-DDR:   %-4s MT/s",
435                        strmhz(buf, sysinfo.freq_ddrbus2));
436         }
437 #endif
438         puts("\n");
439
440         /*
441          * Display the RCW, so that no one gets confused as to what RCW
442          * we're actually using for this boot.
443          */
444         puts("Reset Configuration Word (RCW):");
445         for (i = 0; i < ARRAY_SIZE(gur->rcwsr); i++) {
446                 rcw = gur_in32(&gur->rcwsr[i]);
447                 if ((i % 4) == 0)
448                         printf("\n       %08x:", i * 4);
449                 printf(" %08x", rcw);
450         }
451         puts("\n");
452
453         return 0;
454 }
455 #endif
456
457 #ifdef CONFIG_FSL_ESDHC
458 int cpu_mmc_init(bd_t *bis)
459 {
460         return fsl_esdhc_mmc_init(bis);
461 }
462 #endif
463
464 int cpu_eth_init(bd_t *bis)
465 {
466         int error = 0;
467
468 #ifdef CONFIG_FSL_MC_ENET
469         error = fsl_mc_ldpaa_init(bis);
470 #endif
471 #ifdef CONFIG_FMAN_ENET
472         fm_standard_init(bis);
473 #endif
474         return error;
475 }
476
477 int arch_early_init_r(void)
478 {
479 #ifdef CONFIG_MP
480         int rv = 1;
481         u32 psci_ver = 0xffffffff;
482 #endif
483
484 #ifdef CONFIG_SYS_FSL_ERRATUM_A009635
485         u32 svr_dev_id;
486         /*
487          * erratum A009635 is valid only for LS2080A SoC and
488          * its personalitiesi
489          */
490         svr_dev_id = get_svr() >> 16;
491         if (svr_dev_id == SVR_DEV_LS2080A)
492                 erratum_a009635();
493 #endif
494 #if defined(CONFIG_SYS_FSL_ERRATUM_A009942) && defined(CONFIG_SYS_FSL_DDR)
495         erratum_a009942_check_cpo();
496 #endif
497 #ifdef CONFIG_MP
498 #if defined(CONFIG_ARMV8_SEC_FIRMWARE_SUPPORT) && \
499         defined(CONFIG_SEC_FIRMWARE_ARMV8_PSCI)
500         /* Check the psci version to determine if the psci is supported */
501         psci_ver = sec_firmware_support_psci_version();
502 #endif
503         if (psci_ver == 0xffffffff) {
504                 rv = fsl_layerscape_wake_seconday_cores();
505                 if (rv)
506                         printf("Did not wake secondary cores\n");
507         }
508 #endif
509
510 #ifdef CONFIG_SYS_HAS_SERDES
511         fsl_serdes_init();
512 #endif
513 #ifdef CONFIG_FMAN_ENET
514         fman_enet_init();
515 #endif
516         return 0;
517 }
518
519 int timer_init(void)
520 {
521         u32 __iomem *cntcr = (u32 *)CONFIG_SYS_FSL_TIMER_ADDR;
522 #ifdef CONFIG_FSL_LSCH3
523         u32 __iomem *cltbenr = (u32 *)CONFIG_SYS_FSL_PMU_CLTBENR;
524 #endif
525 #ifdef CONFIG_LS2080A
526         u32 __iomem *pctbenr = (u32 *)FSL_PMU_PCTBENR_OFFSET;
527         u32 svr_dev_id;
528 #endif
529 #ifdef COUNTER_FREQUENCY_REAL
530         unsigned long cntfrq = COUNTER_FREQUENCY_REAL;
531
532         /* Update with accurate clock frequency */
533         asm volatile("msr cntfrq_el0, %0" : : "r" (cntfrq) : "memory");
534 #endif
535
536 #ifdef CONFIG_FSL_LSCH3
537         /* Enable timebase for all clusters.
538          * It is safe to do so even some clusters are not enabled.
539          */
540         out_le32(cltbenr, 0xf);
541 #endif
542
543 #ifdef CONFIG_LS2080A
544         /*
545          * In certain Layerscape SoCs, the clock for each core's
546          * has an enable bit in the PMU Physical Core Time Base Enable
547          * Register (PCTBENR), which allows the watchdog to operate.
548          */
549         setbits_le32(pctbenr, 0xff);
550         /*
551          * For LS2080A SoC and its personalities, timer controller
552          * offset is different
553          */
554         svr_dev_id = get_svr() >> 16;
555         if (svr_dev_id == SVR_DEV_LS2080A)
556                 cntcr = (u32 *)SYS_FSL_LS2080A_LS2085A_TIMER_ADDR;
557
558 #endif
559
560         /* Enable clock for timer
561          * This is a global setting.
562          */
563         out_le32(cntcr, 0x1);
564
565         return 0;
566 }
567
568 __efi_runtime_data u32 __iomem *rstcr = (u32 *)CONFIG_SYS_FSL_RST_ADDR;
569
570 void __efi_runtime reset_cpu(ulong addr)
571 {
572         u32 val;
573
574         /* Raise RESET_REQ_B */
575         val = scfg_in32(rstcr);
576         val |= 0x02;
577         scfg_out32(rstcr, val);
578 }
579
580 #ifdef CONFIG_EFI_LOADER
581
582 void __efi_runtime EFIAPI efi_reset_system(
583                        enum efi_reset_type reset_type,
584                        efi_status_t reset_status,
585                        unsigned long data_size, void *reset_data)
586 {
587         switch (reset_type) {
588         case EFI_RESET_COLD:
589         case EFI_RESET_WARM:
590                 reset_cpu(0);
591                 break;
592         case EFI_RESET_SHUTDOWN:
593                 /* Nothing we can do */
594                 break;
595         }
596
597         while (1) { }
598 }
599
600 void efi_reset_system_init(void)
601 {
602        efi_add_runtime_mmio(&rstcr, sizeof(*rstcr));
603 }
604
605 #endif
606
607 phys_size_t board_reserve_ram_top(phys_size_t ram_size)
608 {
609         phys_size_t ram_top = ram_size;
610
611 #ifdef CONFIG_FSL_MC_ENET
612         /* The start address of MC reserved memory needs to be aligned. */
613         ram_top -= mc_get_dram_block_size();
614         ram_top &= ~(CONFIG_SYS_MC_RSV_MEM_ALIGN - 1);
615 #endif
616
617         return ram_size - ram_top;
618 }
619
620 phys_size_t get_effective_memsize(void)
621 {
622         phys_size_t ea_size, rem = 0;
623
624         /*
625          * For ARMv8 SoCs, DDR memory is split into two or three regions. The
626          * first region is 2GB space at 0x8000_0000. If the memory extends to
627          * the second region (or the third region if applicable), the secure
628          * memory and Management Complex (MC) memory should be put into the
629          * highest region, i.e. the end of DDR memory. CONFIG_MAX_MEM_MAPPED
630          * is set to the size of first region so U-Boot doesn't relocate itself
631          * into higher address. Should DDR be configured to skip the first
632          * region, this function needs to be adjusted.
633          */
634         if (gd->ram_size > CONFIG_MAX_MEM_MAPPED) {
635                 ea_size = CONFIG_MAX_MEM_MAPPED;
636                 rem = gd->ram_size - ea_size;
637         } else {
638                 ea_size = gd->ram_size;
639         }
640
641 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
642         /* Check if we have enough space for secure memory */
643         if (rem > CONFIG_SYS_MEM_RESERVE_SECURE) {
644                 rem -= CONFIG_SYS_MEM_RESERVE_SECURE;
645         } else {
646                 if (ea_size > CONFIG_SYS_MEM_RESERVE_SECURE) {
647                         ea_size -= CONFIG_SYS_MEM_RESERVE_SECURE;
648                         rem = 0;        /* Presume MC requires more memory */
649                 } else {
650                         printf("Error: No enough space for secure memory.\n");
651                 }
652         }
653 #endif
654         /* Check if we have enough memory for MC */
655         if (rem < board_reserve_ram_top(rem)) {
656                 /* Not enough memory in high region to reserve */
657                 if (ea_size > board_reserve_ram_top(rem))
658                         ea_size -= board_reserve_ram_top(rem);
659                 else
660                         printf("Error: No enough space for reserved memory.\n");
661         }
662
663         return ea_size;
664 }
665
666 int dram_init_banksize(void)
667 {
668 #ifdef CONFIG_SYS_DP_DDR_BASE_PHY
669         phys_size_t dp_ddr_size;
670 #endif
671
672         /*
673          * gd->ram_size has the total size of DDR memory, less reserved secure
674          * memory. The DDR extends from low region to high region(s) presuming
675          * no hole is created with DDR configuration. gd->arch.secure_ram tracks
676          * the location of secure memory. gd->arch.resv_ram tracks the location
677          * of reserved memory for Management Complex (MC).
678          */
679         gd->bd->bi_dram[0].start = CONFIG_SYS_SDRAM_BASE;
680         if (gd->ram_size > CONFIG_SYS_DDR_BLOCK1_SIZE) {
681                 gd->bd->bi_dram[0].size = CONFIG_SYS_DDR_BLOCK1_SIZE;
682                 gd->bd->bi_dram[1].start = CONFIG_SYS_DDR_BLOCK2_BASE;
683                 gd->bd->bi_dram[1].size = gd->ram_size -
684                                           CONFIG_SYS_DDR_BLOCK1_SIZE;
685 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
686                 if (gd->bi_dram[1].size > CONFIG_SYS_DDR_BLOCK2_SIZE) {
687                         gd->bd->bi_dram[2].start = CONFIG_SYS_DDR_BLOCK3_BASE;
688                         gd->bd->bi_dram[2].size = gd->bd->bi_dram[1].size -
689                                                   CONFIG_SYS_DDR_BLOCK2_SIZE;
690                         gd->bd->bi_dram[1].size = CONFIG_SYS_DDR_BLOCK2_SIZE;
691                 }
692 #endif
693         } else {
694                 gd->bd->bi_dram[0].size = gd->ram_size;
695         }
696 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
697 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
698         if (gd->bd->bi_dram[2].size >= CONFIG_SYS_MEM_RESERVE_SECURE) {
699                 gd->bd->bi_dram[2].size -= CONFIG_SYS_MEM_RESERVE_SECURE;
700                 gd->arch.secure_ram = gd->bd->bi_dram[2].start +
701                                       gd->bd->bi_dram[2].size;
702                 gd->arch.secure_ram |= MEM_RESERVE_SECURE_MAINTAINED;
703                 gd->ram_size -= CONFIG_SYS_MEM_RESERVE_SECURE;
704         } else
705 #endif
706         {
707                 if (gd->bd->bi_dram[1].size >= CONFIG_SYS_MEM_RESERVE_SECURE) {
708                         gd->bd->bi_dram[1].size -=
709                                         CONFIG_SYS_MEM_RESERVE_SECURE;
710                         gd->arch.secure_ram = gd->bd->bi_dram[1].start +
711                                               gd->bd->bi_dram[1].size;
712                         gd->arch.secure_ram |= MEM_RESERVE_SECURE_MAINTAINED;
713                         gd->ram_size -= CONFIG_SYS_MEM_RESERVE_SECURE;
714                 } else if (gd->bd->bi_dram[0].size >
715                                         CONFIG_SYS_MEM_RESERVE_SECURE) {
716                         gd->bd->bi_dram[0].size -=
717                                         CONFIG_SYS_MEM_RESERVE_SECURE;
718                         gd->arch.secure_ram = gd->bd->bi_dram[0].start +
719                                               gd->bd->bi_dram[0].size;
720                         gd->arch.secure_ram |= MEM_RESERVE_SECURE_MAINTAINED;
721                         gd->ram_size -= CONFIG_SYS_MEM_RESERVE_SECURE;
722                 }
723         }
724 #endif  /* CONFIG_SYS_MEM_RESERVE_SECURE */
725
726 #ifdef CONFIG_FSL_MC_ENET
727         /* Assign memory for MC */
728 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
729         if (gd->bd->bi_dram[2].size >=
730             board_reserve_ram_top(gd->bd->bi_dram[2].size)) {
731                 gd->arch.resv_ram = gd->bd->bi_dram[2].start +
732                             gd->bd->bi_dram[2].size -
733                             board_reserve_ram_top(gd->bd->bi_dram[2].size);
734         } else
735 #endif
736         {
737                 if (gd->bd->bi_dram[1].size >=
738                     board_reserve_ram_top(gd->bd->bi_dram[1].size)) {
739                         gd->arch.resv_ram = gd->bd->bi_dram[1].start +
740                                 gd->bd->bi_dram[1].size -
741                                 board_reserve_ram_top(gd->bd->bi_dram[1].size);
742                 } else if (gd->bd->bi_dram[0].size >
743                            board_reserve_ram_top(gd->bd->bi_dram[0].size)) {
744                         gd->arch.resv_ram = gd->bd->bi_dram[0].start +
745                                 gd->bd->bi_dram[0].size -
746                                 board_reserve_ram_top(gd->bd->bi_dram[0].size);
747                 }
748         }
749 #endif  /* CONFIG_FSL_MC_ENET */
750
751 #ifdef CONFIG_SYS_DP_DDR_BASE_PHY
752 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
753 #error "This SoC shouldn't have DP DDR"
754 #endif
755         if (soc_has_dp_ddr()) {
756                 /* initialize DP-DDR here */
757                 puts("DP-DDR:  ");
758                 /*
759                  * DDR controller use 0 as the base address for binding.
760                  * It is mapped to CONFIG_SYS_DP_DDR_BASE for core to access.
761                  */
762                 dp_ddr_size = fsl_other_ddr_sdram(CONFIG_SYS_DP_DDR_BASE_PHY,
763                                           CONFIG_DP_DDR_CTRL,
764                                           CONFIG_DP_DDR_NUM_CTRLS,
765                                           CONFIG_DP_DDR_DIMM_SLOTS_PER_CTLR,
766                                           NULL, NULL, NULL);
767                 if (dp_ddr_size) {
768                         gd->bd->bi_dram[2].start = CONFIG_SYS_DP_DDR_BASE;
769                         gd->bd->bi_dram[2].size = dp_ddr_size;
770                 } else {
771                         puts("Not detected");
772                 }
773         }
774 #endif
775
776         return 0;
777 }
778
779 #if defined(CONFIG_EFI_LOADER) && !defined(CONFIG_SPL_BUILD)
780 void efi_add_known_memory(void)
781 {
782         int i;
783         phys_addr_t ram_start, start;
784         phys_size_t ram_size;
785         u64 pages;
786
787         /* Add RAM */
788         for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
789 #ifdef CONFIG_SYS_DP_DDR_BASE_PHY
790 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
791 #error "This SoC shouldn't have DP DDR"
792 #endif
793                 if (i == 2)
794                         continue;       /* skip DP-DDR */
795 #endif
796                 ram_start = gd->bd->bi_dram[i].start;
797                 ram_size = gd->bd->bi_dram[i].size;
798 #ifdef CONFIG_RESV_RAM
799                 if (gd->arch.resv_ram >= ram_start &&
800                     gd->arch.resv_ram < ram_start + ram_size)
801                         ram_size = gd->arch.resv_ram - ram_start;
802 #endif
803                 start = (ram_start + EFI_PAGE_MASK) & ~EFI_PAGE_MASK;
804                 pages = (ram_size + EFI_PAGE_MASK) >> EFI_PAGE_SHIFT;
805
806                 efi_add_memory_map(start, pages, EFI_CONVENTIONAL_MEMORY,
807                                    false);
808         }
809 }
810 #endif
811
812 /*
813  * Before DDR size is known, early MMU table have DDR mapped as device memory
814  * to avoid speculative access. To relocate U-Boot to DDR, "normal memory"
815  * needs to be set for these mappings.
816  * If a special case configures DDR with holes in the mapping, the holes need
817  * to be marked as invalid. This is not implemented in this function.
818  */
819 void update_early_mmu_table(void)
820 {
821         if (!gd->arch.tlb_addr)
822                 return;
823
824         if (gd->ram_size <= CONFIG_SYS_FSL_DRAM_SIZE1) {
825                 mmu_change_region_attr(
826                                         CONFIG_SYS_SDRAM_BASE,
827                                         gd->ram_size,
828                                         PTE_BLOCK_MEMTYPE(MT_NORMAL)    |
829                                         PTE_BLOCK_OUTER_SHARE           |
830                                         PTE_BLOCK_NS                    |
831                                         PTE_TYPE_VALID);
832         } else {
833                 mmu_change_region_attr(
834                                         CONFIG_SYS_SDRAM_BASE,
835                                         CONFIG_SYS_DDR_BLOCK1_SIZE,
836                                         PTE_BLOCK_MEMTYPE(MT_NORMAL)    |
837                                         PTE_BLOCK_OUTER_SHARE           |
838                                         PTE_BLOCK_NS                    |
839                                         PTE_TYPE_VALID);
840 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
841 #ifndef CONFIG_SYS_DDR_BLOCK2_SIZE
842 #error "Missing CONFIG_SYS_DDR_BLOCK2_SIZE"
843 #endif
844                 if (gd->ram_size - CONFIG_SYS_DDR_BLOCK1_SIZE >
845                     CONFIG_SYS_DDR_BLOCK2_SIZE) {
846                         mmu_change_region_attr(
847                                         CONFIG_SYS_DDR_BLOCK2_BASE,
848                                         CONFIG_SYS_DDR_BLOCK2_SIZE,
849                                         PTE_BLOCK_MEMTYPE(MT_NORMAL)    |
850                                         PTE_BLOCK_OUTER_SHARE           |
851                                         PTE_BLOCK_NS                    |
852                                         PTE_TYPE_VALID);
853                         mmu_change_region_attr(
854                                         CONFIG_SYS_DDR_BLOCK3_BASE,
855                                         gd->ram_size -
856                                         CONFIG_SYS_DDR_BLOCK1_SIZE -
857                                         CONFIG_SYS_DDR_BLOCK2_SIZE,
858                                         PTE_BLOCK_MEMTYPE(MT_NORMAL)    |
859                                         PTE_BLOCK_OUTER_SHARE           |
860                                         PTE_BLOCK_NS                    |
861                                         PTE_TYPE_VALID);
862                 } else
863 #endif
864                 {
865                         mmu_change_region_attr(
866                                         CONFIG_SYS_DDR_BLOCK2_BASE,
867                                         gd->ram_size -
868                                         CONFIG_SYS_DDR_BLOCK1_SIZE,
869                                         PTE_BLOCK_MEMTYPE(MT_NORMAL)    |
870                                         PTE_BLOCK_OUTER_SHARE           |
871                                         PTE_BLOCK_NS                    |
872                                         PTE_TYPE_VALID);
873                 }
874         }
875 }
876
877 __weak int dram_init(void)
878 {
879         initdram();
880 #if !defined(CONFIG_SPL) || defined(CONFIG_SPL_BUILD)
881         /* This will break-before-make MMU for DDR */
882         update_early_mmu_table();
883 #endif
884
885         return 0;
886 }