3 * Copyright 2014-2015 Freescale Semiconductor, Inc.
5 * SPDX-License-Identifier: GPL-2.0+
9 #include <fsl_ddr_sdram.h>
11 #include <linux/errno.h>
12 #include <asm/system.h>
13 #include <asm/armv8/mmu.h>
15 #include <asm/arch/fsl_serdes.h>
16 #include <asm/arch/soc.h>
17 #include <asm/arch/cpu.h>
18 #include <asm/arch/speed.h>
19 #include <asm/arch/mp.h>
20 #include <efi_loader.h>
22 #include <fsl-mc/fsl_mc.h>
23 #ifdef CONFIG_FSL_ESDHC
24 #include <fsl_esdhc.h>
26 #include <asm/armv8/sec_firmware.h>
27 #ifdef CONFIG_SYS_FSL_DDR
31 DECLARE_GLOBAL_DATA_PTR;
33 struct mm_region *mem_map = early_map;
35 void cpu_name(char *name)
37 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
38 unsigned int i, svr, ver;
40 svr = gur_in32(&gur->svr);
41 ver = SVR_SOC_VER(svr);
43 for (i = 0; i < ARRAY_SIZE(cpu_type_list); i++)
44 if ((cpu_type_list[i].soc_ver & SVR_WO_E) == ver) {
45 strcpy(name, cpu_type_list[i].name);
47 if (IS_E_PROCESSOR(svr))
50 sprintf(name + strlen(name), " Rev%d.%d",
51 SVR_MAJ(svr), SVR_MIN(svr));
55 if (i == ARRAY_SIZE(cpu_type_list))
56 strcpy(name, "unknown");
59 #ifndef CONFIG_SYS_DCACHE_OFF
61 * To start MMU before DDR is available, we create MMU table in SRAM.
62 * The base address of SRAM is CONFIG_SYS_FSL_OCRAM_BASE. We use three
63 * levels of translation tables here to cover 40-bit address space.
64 * We use 4KB granule size, with 40 bits physical address, T0SZ=24
65 * Address above EARLY_PGTABLE_SIZE (0x5000) is free for other purpose.
66 * Note, the debug print in cache_v8.c is not usable for debugging
67 * these early MMU tables because UART is not yet available.
69 static inline void early_mmu_setup(void)
71 unsigned int el = current_el();
73 /* global data is already setup, no allocation yet */
74 gd->arch.tlb_addr = CONFIG_SYS_FSL_OCRAM_BASE;
75 gd->arch.tlb_fillptr = gd->arch.tlb_addr;
76 gd->arch.tlb_size = EARLY_PGTABLE_SIZE;
78 /* Create early page tables */
81 /* point TTBR to the new table */
82 set_ttbr_tcr_mair(el, gd->arch.tlb_addr,
83 get_tcr(el, NULL, NULL) &
84 ~(TCR_ORGN_MASK | TCR_IRGN_MASK),
87 set_sctlr(get_sctlr() | CR_M);
90 static void fix_pcie_mmu_map(void)
92 #ifdef CONFIG_ARCH_LS2080A
95 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
97 svr = gur_in32(&gur->svr);
98 ver = SVR_SOC_VER(svr);
100 /* Fix PCIE base and size for LS2088A */
101 if ((ver == SVR_LS2088A) || (ver == SVR_LS2084A) ||
102 (ver == SVR_LS2048A) || (ver == SVR_LS2044A) ||
103 (ver == SVR_LS2081A) || (ver == SVR_LS2041A)) {
104 for (i = 0; i < ARRAY_SIZE(final_map); i++) {
105 switch (final_map[i].phys) {
106 case CONFIG_SYS_PCIE1_PHYS_ADDR:
107 final_map[i].phys = 0x2000000000ULL;
108 final_map[i].virt = 0x2000000000ULL;
109 final_map[i].size = 0x800000000ULL;
111 case CONFIG_SYS_PCIE2_PHYS_ADDR:
112 final_map[i].phys = 0x2800000000ULL;
113 final_map[i].virt = 0x2800000000ULL;
114 final_map[i].size = 0x800000000ULL;
116 case CONFIG_SYS_PCIE3_PHYS_ADDR:
117 final_map[i].phys = 0x3000000000ULL;
118 final_map[i].virt = 0x3000000000ULL;
119 final_map[i].size = 0x800000000ULL;
121 case CONFIG_SYS_PCIE4_PHYS_ADDR:
122 final_map[i].phys = 0x3800000000ULL;
123 final_map[i].virt = 0x3800000000ULL;
124 final_map[i].size = 0x800000000ULL;
135 * The final tables look similar to early tables, but different in detail.
136 * These tables are in DRAM. Sub tables are added to enable cache for
139 * Put the MMU table in secure memory if gd->arch.secure_ram is valid.
140 * OCRAM will be not used for this purpose so gd->arch.secure_ram can't be 0.
142 static inline void final_mmu_setup(void)
144 u64 tlb_addr_save = gd->arch.tlb_addr;
145 unsigned int el = current_el();
148 /* fix the final_map before filling in the block entries */
153 /* Update mapping for DDR to actual size */
154 for (index = 0; index < ARRAY_SIZE(final_map) - 2; index++) {
156 * Find the entry for DDR mapping and update the address and
157 * size. Zero-sized mapping will be skipped when creating MMU
160 switch (final_map[index].virt) {
161 case CONFIG_SYS_FSL_DRAM_BASE1:
162 final_map[index].virt = gd->bd->bi_dram[0].start;
163 final_map[index].phys = gd->bd->bi_dram[0].start;
164 final_map[index].size = gd->bd->bi_dram[0].size;
166 #ifdef CONFIG_SYS_FSL_DRAM_BASE2
167 case CONFIG_SYS_FSL_DRAM_BASE2:
168 #if (CONFIG_NR_DRAM_BANKS >= 2)
169 final_map[index].virt = gd->bd->bi_dram[1].start;
170 final_map[index].phys = gd->bd->bi_dram[1].start;
171 final_map[index].size = gd->bd->bi_dram[1].size;
173 final_map[index].size = 0;
177 #ifdef CONFIG_SYS_FSL_DRAM_BASE3
178 case CONFIG_SYS_FSL_DRAM_BASE3:
179 #if (CONFIG_NR_DRAM_BANKS >= 3)
180 final_map[index].virt = gd->bd->bi_dram[2].start;
181 final_map[index].phys = gd->bd->bi_dram[2].start;
182 final_map[index].size = gd->bd->bi_dram[2].size;
184 final_map[index].size = 0;
193 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
194 if (gd->arch.secure_ram & MEM_RESERVE_SECURE_MAINTAINED) {
197 * Only use gd->arch.secure_ram if the address is
198 * recalculated. Align to 4KB for MMU table.
200 /* put page tables in secure ram */
201 index = ARRAY_SIZE(final_map) - 2;
202 gd->arch.tlb_addr = gd->arch.secure_ram & ~0xfff;
203 final_map[index].virt = gd->arch.secure_ram & ~0x3;
204 final_map[index].phys = final_map[index].virt;
205 final_map[index].size = CONFIG_SYS_MEM_RESERVE_SECURE;
206 final_map[index].attrs = PTE_BLOCK_OUTER_SHARE;
207 gd->arch.secure_ram |= MEM_RESERVE_SECURE_SECURED;
208 tlb_addr_save = gd->arch.tlb_addr;
210 /* Use allocated (board_f.c) memory for TLB */
211 tlb_addr_save = gd->arch.tlb_allocated;
212 gd->arch.tlb_addr = tlb_addr_save;
217 /* Reset the fill ptr */
218 gd->arch.tlb_fillptr = tlb_addr_save;
220 /* Create normal system page tables */
223 /* Create emergency page tables */
224 gd->arch.tlb_addr = gd->arch.tlb_fillptr;
225 gd->arch.tlb_emerg = gd->arch.tlb_addr;
227 gd->arch.tlb_addr = tlb_addr_save;
229 /* Disable cache and MMU */
230 dcache_disable(); /* TLBs are invalidated */
231 invalidate_icache_all();
233 /* point TTBR to the new table */
234 set_ttbr_tcr_mair(el, gd->arch.tlb_addr, get_tcr(el, NULL, NULL),
237 set_sctlr(get_sctlr() | CR_M);
240 u64 get_page_table_size(void)
245 int arch_cpu_init(void)
248 __asm_invalidate_dcache_all();
249 __asm_invalidate_tlb_all();
251 set_sctlr(get_sctlr() | CR_C);
261 * This function is called from common/board_r.c.
262 * It recreates MMU table in main memory.
264 void enable_caches(void)
267 __asm_invalidate_tlb_all();
273 u32 initiator_type(u32 cluster, int init_id)
275 struct ccsr_gur *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
276 u32 idx = (cluster >> (init_id * 8)) & TP_CLUSTER_INIT_MASK;
279 type = gur_in32(&gur->tp_ityp[idx]);
280 if (type & TP_ITYP_AV)
286 u32 cpu_pos_mask(void)
288 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
290 u32 cluster, type, mask = 0;
295 cluster = gur_in32(&gur->tp_cluster[i].lower);
296 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
297 type = initiator_type(cluster, j);
298 if (type && (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM))
299 mask |= 1 << (i * TP_INIT_PER_CLUSTER + j);
302 } while ((cluster & TP_CLUSTER_EOC) == 0x0);
309 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
310 int i = 0, count = 0;
311 u32 cluster, type, mask = 0;
316 cluster = gur_in32(&gur->tp_cluster[i].lower);
317 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
318 type = initiator_type(cluster, j);
320 if (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM)
326 } while ((cluster & TP_CLUSTER_EOC) == 0x0);
332 * Return the number of cores on this SOC.
334 int cpu_numcores(void)
336 return hweight32(cpu_mask());
339 int fsl_qoriq_core_to_cluster(unsigned int core)
341 struct ccsr_gur __iomem *gur =
342 (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR);
343 int i = 0, count = 0;
349 cluster = gur_in32(&gur->tp_cluster[i].lower);
350 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
351 if (initiator_type(cluster, j)) {
358 } while ((cluster & TP_CLUSTER_EOC) == 0x0);
360 return -1; /* cannot identify the cluster */
363 u32 fsl_qoriq_core_to_type(unsigned int core)
365 struct ccsr_gur __iomem *gur =
366 (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR);
367 int i = 0, count = 0;
373 cluster = gur_in32(&gur->tp_cluster[i].lower);
374 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
375 type = initiator_type(cluster, j);
383 } while ((cluster & TP_CLUSTER_EOC) == 0x0);
385 return -1; /* cannot identify the cluster */
388 #ifndef CONFIG_FSL_LSCH3
391 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
393 return gur_in32(&gur->svr);
397 #ifdef CONFIG_DISPLAY_CPUINFO
398 int print_cpuinfo(void)
400 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
401 struct sys_info sysinfo;
403 unsigned int i, core;
404 u32 type, rcw, svr = gur_in32(&gur->svr);
409 printf(" %s (0x%x)\n", buf, svr);
410 memset((u8 *)buf, 0x00, ARRAY_SIZE(buf));
411 get_sys_info(&sysinfo);
412 puts("Clock Configuration:");
413 for_each_cpu(i, core, cpu_numcores(), cpu_mask()) {
416 type = TP_ITYP_VER(fsl_qoriq_core_to_type(core));
417 printf("CPU%d(%s):%-4s MHz ", core,
418 type == TY_ITYP_VER_A7 ? "A7 " :
419 (type == TY_ITYP_VER_A53 ? "A53" :
420 (type == TY_ITYP_VER_A57 ? "A57" :
421 (type == TY_ITYP_VER_A72 ? "A72" : " "))),
422 strmhz(buf, sysinfo.freq_processor[core]));
424 /* Display platform clock as Bus frequency. */
425 printf("\n Bus: %-4s MHz ",
426 strmhz(buf, sysinfo.freq_systembus / CONFIG_SYS_FSL_PCLK_DIV));
427 printf("DDR: %-4s MT/s", strmhz(buf, sysinfo.freq_ddrbus));
428 #ifdef CONFIG_SYS_DPAA_FMAN
429 printf(" FMAN: %-4s MHz", strmhz(buf, sysinfo.freq_fman[0]));
431 #ifdef CONFIG_SYS_FSL_HAS_DP_DDR
432 if (soc_has_dp_ddr()) {
433 printf(" DP-DDR: %-4s MT/s",
434 strmhz(buf, sysinfo.freq_ddrbus2));
440 * Display the RCW, so that no one gets confused as to what RCW
441 * we're actually using for this boot.
443 puts("Reset Configuration Word (RCW):");
444 for (i = 0; i < ARRAY_SIZE(gur->rcwsr); i++) {
445 rcw = gur_in32(&gur->rcwsr[i]);
447 printf("\n %08x:", i * 4);
448 printf(" %08x", rcw);
456 #ifdef CONFIG_FSL_ESDHC
457 int cpu_mmc_init(bd_t *bis)
459 return fsl_esdhc_mmc_init(bis);
463 int cpu_eth_init(bd_t *bis)
467 #ifdef CONFIG_FSL_MC_ENET
468 error = fsl_mc_ldpaa_init(bis);
470 #ifdef CONFIG_FMAN_ENET
471 fm_standard_init(bis);
476 static inline int check_psci(void)
478 unsigned int psci_ver;
480 psci_ver = sec_firmware_support_psci_version();
481 if (psci_ver == PSCI_INVALID_VER)
487 int arch_early_init_r(void)
489 #ifdef CONFIG_SYS_FSL_ERRATUM_A009635
492 * erratum A009635 is valid only for LS2080A SoC and
495 svr_dev_id = get_svr() >> 16;
496 if (svr_dev_id == SVR_DEV_LS2080A)
499 #if defined(CONFIG_SYS_FSL_ERRATUM_A009942) && defined(CONFIG_SYS_FSL_DDR)
500 erratum_a009942_check_cpo();
503 debug("PSCI: PSCI does not exist.\n");
505 /* if PSCI does not exist, boot secondary cores here */
506 if (fsl_layerscape_wake_seconday_cores())
507 printf("Did not wake secondary cores\n");
510 #ifdef CONFIG_SYS_HAS_SERDES
513 #ifdef CONFIG_FMAN_ENET
521 u32 __iomem *cntcr = (u32 *)CONFIG_SYS_FSL_TIMER_ADDR;
522 #ifdef CONFIG_FSL_LSCH3
523 u32 __iomem *cltbenr = (u32 *)CONFIG_SYS_FSL_PMU_CLTBENR;
525 #ifdef CONFIG_ARCH_LS2080A
526 u32 __iomem *pctbenr = (u32 *)FSL_PMU_PCTBENR_OFFSET;
529 #ifdef COUNTER_FREQUENCY_REAL
530 unsigned long cntfrq = COUNTER_FREQUENCY_REAL;
532 /* Update with accurate clock frequency */
533 asm volatile("msr cntfrq_el0, %0" : : "r" (cntfrq) : "memory");
536 #ifdef CONFIG_FSL_LSCH3
537 /* Enable timebase for all clusters.
538 * It is safe to do so even some clusters are not enabled.
540 out_le32(cltbenr, 0xf);
543 #ifdef CONFIG_ARCH_LS2080A
545 * In certain Layerscape SoCs, the clock for each core's
546 * has an enable bit in the PMU Physical Core Time Base Enable
547 * Register (PCTBENR), which allows the watchdog to operate.
549 setbits_le32(pctbenr, 0xff);
551 * For LS2080A SoC and its personalities, timer controller
552 * offset is different
554 svr_dev_id = get_svr() >> 16;
555 if (svr_dev_id == SVR_DEV_LS2080A)
556 cntcr = (u32 *)SYS_FSL_LS2080A_LS2085A_TIMER_ADDR;
560 /* Enable clock for timer
561 * This is a global setting.
563 out_le32(cntcr, 0x1);
568 __efi_runtime_data u32 __iomem *rstcr = (u32 *)CONFIG_SYS_FSL_RST_ADDR;
570 void __efi_runtime reset_cpu(ulong addr)
574 /* Raise RESET_REQ_B */
575 val = scfg_in32(rstcr);
577 scfg_out32(rstcr, val);
580 #ifdef CONFIG_EFI_LOADER
582 void __efi_runtime EFIAPI efi_reset_system(
583 enum efi_reset_type reset_type,
584 efi_status_t reset_status,
585 unsigned long data_size, void *reset_data)
587 switch (reset_type) {
592 case EFI_RESET_SHUTDOWN:
593 /* Nothing we can do */
600 void efi_reset_system_init(void)
602 efi_add_runtime_mmio(&rstcr, sizeof(*rstcr));
607 phys_size_t board_reserve_ram_top(phys_size_t ram_size)
609 phys_size_t ram_top = ram_size;
611 #ifdef CONFIG_FSL_MC_ENET
612 /* The start address of MC reserved memory needs to be aligned. */
613 ram_top -= mc_get_dram_block_size();
614 ram_top &= ~(CONFIG_SYS_MC_RSV_MEM_ALIGN - 1);
617 return ram_size - ram_top;
620 phys_size_t get_effective_memsize(void)
622 phys_size_t ea_size, rem = 0;
625 * For ARMv8 SoCs, DDR memory is split into two or three regions. The
626 * first region is 2GB space at 0x8000_0000. If the memory extends to
627 * the second region (or the third region if applicable), the secure
628 * memory and Management Complex (MC) memory should be put into the
629 * highest region, i.e. the end of DDR memory. CONFIG_MAX_MEM_MAPPED
630 * is set to the size of first region so U-Boot doesn't relocate itself
631 * into higher address. Should DDR be configured to skip the first
632 * region, this function needs to be adjusted.
634 if (gd->ram_size > CONFIG_MAX_MEM_MAPPED) {
635 ea_size = CONFIG_MAX_MEM_MAPPED;
636 rem = gd->ram_size - ea_size;
638 ea_size = gd->ram_size;
641 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
642 /* Check if we have enough space for secure memory */
643 if (rem > CONFIG_SYS_MEM_RESERVE_SECURE) {
644 rem -= CONFIG_SYS_MEM_RESERVE_SECURE;
646 if (ea_size > CONFIG_SYS_MEM_RESERVE_SECURE) {
647 ea_size -= CONFIG_SYS_MEM_RESERVE_SECURE;
648 rem = 0; /* Presume MC requires more memory */
650 printf("Error: No enough space for secure memory.\n");
654 /* Check if we have enough memory for MC */
655 if (rem < board_reserve_ram_top(rem)) {
656 /* Not enough memory in high region to reserve */
657 if (ea_size > board_reserve_ram_top(rem))
658 ea_size -= board_reserve_ram_top(rem);
660 printf("Error: No enough space for reserved memory.\n");
666 int dram_init_banksize(void)
668 #ifdef CONFIG_SYS_DP_DDR_BASE_PHY
669 phys_size_t dp_ddr_size;
673 * gd->ram_size has the total size of DDR memory, less reserved secure
674 * memory. The DDR extends from low region to high region(s) presuming
675 * no hole is created with DDR configuration. gd->arch.secure_ram tracks
676 * the location of secure memory. gd->arch.resv_ram tracks the location
677 * of reserved memory for Management Complex (MC).
679 gd->bd->bi_dram[0].start = CONFIG_SYS_SDRAM_BASE;
680 if (gd->ram_size > CONFIG_SYS_DDR_BLOCK1_SIZE) {
681 gd->bd->bi_dram[0].size = CONFIG_SYS_DDR_BLOCK1_SIZE;
682 gd->bd->bi_dram[1].start = CONFIG_SYS_DDR_BLOCK2_BASE;
683 gd->bd->bi_dram[1].size = gd->ram_size -
684 CONFIG_SYS_DDR_BLOCK1_SIZE;
685 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
686 if (gd->bi_dram[1].size > CONFIG_SYS_DDR_BLOCK2_SIZE) {
687 gd->bd->bi_dram[2].start = CONFIG_SYS_DDR_BLOCK3_BASE;
688 gd->bd->bi_dram[2].size = gd->bd->bi_dram[1].size -
689 CONFIG_SYS_DDR_BLOCK2_SIZE;
690 gd->bd->bi_dram[1].size = CONFIG_SYS_DDR_BLOCK2_SIZE;
694 gd->bd->bi_dram[0].size = gd->ram_size;
696 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
697 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
698 if (gd->bd->bi_dram[2].size >= CONFIG_SYS_MEM_RESERVE_SECURE) {
699 gd->bd->bi_dram[2].size -= CONFIG_SYS_MEM_RESERVE_SECURE;
700 gd->arch.secure_ram = gd->bd->bi_dram[2].start +
701 gd->bd->bi_dram[2].size;
702 gd->arch.secure_ram |= MEM_RESERVE_SECURE_MAINTAINED;
703 gd->ram_size -= CONFIG_SYS_MEM_RESERVE_SECURE;
707 if (gd->bd->bi_dram[1].size >= CONFIG_SYS_MEM_RESERVE_SECURE) {
708 gd->bd->bi_dram[1].size -=
709 CONFIG_SYS_MEM_RESERVE_SECURE;
710 gd->arch.secure_ram = gd->bd->bi_dram[1].start +
711 gd->bd->bi_dram[1].size;
712 gd->arch.secure_ram |= MEM_RESERVE_SECURE_MAINTAINED;
713 gd->ram_size -= CONFIG_SYS_MEM_RESERVE_SECURE;
714 } else if (gd->bd->bi_dram[0].size >
715 CONFIG_SYS_MEM_RESERVE_SECURE) {
716 gd->bd->bi_dram[0].size -=
717 CONFIG_SYS_MEM_RESERVE_SECURE;
718 gd->arch.secure_ram = gd->bd->bi_dram[0].start +
719 gd->bd->bi_dram[0].size;
720 gd->arch.secure_ram |= MEM_RESERVE_SECURE_MAINTAINED;
721 gd->ram_size -= CONFIG_SYS_MEM_RESERVE_SECURE;
724 #endif /* CONFIG_SYS_MEM_RESERVE_SECURE */
726 #ifdef CONFIG_FSL_MC_ENET
727 /* Assign memory for MC */
728 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
729 if (gd->bd->bi_dram[2].size >=
730 board_reserve_ram_top(gd->bd->bi_dram[2].size)) {
731 gd->arch.resv_ram = gd->bd->bi_dram[2].start +
732 gd->bd->bi_dram[2].size -
733 board_reserve_ram_top(gd->bd->bi_dram[2].size);
737 if (gd->bd->bi_dram[1].size >=
738 board_reserve_ram_top(gd->bd->bi_dram[1].size)) {
739 gd->arch.resv_ram = gd->bd->bi_dram[1].start +
740 gd->bd->bi_dram[1].size -
741 board_reserve_ram_top(gd->bd->bi_dram[1].size);
742 } else if (gd->bd->bi_dram[0].size >
743 board_reserve_ram_top(gd->bd->bi_dram[0].size)) {
744 gd->arch.resv_ram = gd->bd->bi_dram[0].start +
745 gd->bd->bi_dram[0].size -
746 board_reserve_ram_top(gd->bd->bi_dram[0].size);
749 #endif /* CONFIG_FSL_MC_ENET */
751 #ifdef CONFIG_SYS_DP_DDR_BASE_PHY
752 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
753 #error "This SoC shouldn't have DP DDR"
755 if (soc_has_dp_ddr()) {
756 /* initialize DP-DDR here */
759 * DDR controller use 0 as the base address for binding.
760 * It is mapped to CONFIG_SYS_DP_DDR_BASE for core to access.
762 dp_ddr_size = fsl_other_ddr_sdram(CONFIG_SYS_DP_DDR_BASE_PHY,
764 CONFIG_DP_DDR_NUM_CTRLS,
765 CONFIG_DP_DDR_DIMM_SLOTS_PER_CTLR,
768 gd->bd->bi_dram[2].start = CONFIG_SYS_DP_DDR_BASE;
769 gd->bd->bi_dram[2].size = dp_ddr_size;
771 puts("Not detected");
779 #if defined(CONFIG_EFI_LOADER) && !defined(CONFIG_SPL_BUILD)
780 void efi_add_known_memory(void)
783 phys_addr_t ram_start, start;
784 phys_size_t ram_size;
788 for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
789 #ifdef CONFIG_SYS_DP_DDR_BASE_PHY
790 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
791 #error "This SoC shouldn't have DP DDR"
794 continue; /* skip DP-DDR */
796 ram_start = gd->bd->bi_dram[i].start;
797 ram_size = gd->bd->bi_dram[i].size;
798 #ifdef CONFIG_RESV_RAM
799 if (gd->arch.resv_ram >= ram_start &&
800 gd->arch.resv_ram < ram_start + ram_size)
801 ram_size = gd->arch.resv_ram - ram_start;
803 start = (ram_start + EFI_PAGE_MASK) & ~EFI_PAGE_MASK;
804 pages = (ram_size + EFI_PAGE_MASK) >> EFI_PAGE_SHIFT;
806 efi_add_memory_map(start, pages, EFI_CONVENTIONAL_MEMORY,
813 * Before DDR size is known, early MMU table have DDR mapped as device memory
814 * to avoid speculative access. To relocate U-Boot to DDR, "normal memory"
815 * needs to be set for these mappings.
816 * If a special case configures DDR with holes in the mapping, the holes need
817 * to be marked as invalid. This is not implemented in this function.
819 void update_early_mmu_table(void)
821 if (!gd->arch.tlb_addr)
824 if (gd->ram_size <= CONFIG_SYS_FSL_DRAM_SIZE1) {
825 mmu_change_region_attr(
826 CONFIG_SYS_SDRAM_BASE,
828 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
829 PTE_BLOCK_OUTER_SHARE |
833 mmu_change_region_attr(
834 CONFIG_SYS_SDRAM_BASE,
835 CONFIG_SYS_DDR_BLOCK1_SIZE,
836 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
837 PTE_BLOCK_OUTER_SHARE |
840 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
841 #ifndef CONFIG_SYS_DDR_BLOCK2_SIZE
842 #error "Missing CONFIG_SYS_DDR_BLOCK2_SIZE"
844 if (gd->ram_size - CONFIG_SYS_DDR_BLOCK1_SIZE >
845 CONFIG_SYS_DDR_BLOCK2_SIZE) {
846 mmu_change_region_attr(
847 CONFIG_SYS_DDR_BLOCK2_BASE,
848 CONFIG_SYS_DDR_BLOCK2_SIZE,
849 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
850 PTE_BLOCK_OUTER_SHARE |
853 mmu_change_region_attr(
854 CONFIG_SYS_DDR_BLOCK3_BASE,
856 CONFIG_SYS_DDR_BLOCK1_SIZE -
857 CONFIG_SYS_DDR_BLOCK2_SIZE,
858 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
859 PTE_BLOCK_OUTER_SHARE |
865 mmu_change_region_attr(
866 CONFIG_SYS_DDR_BLOCK2_BASE,
868 CONFIG_SYS_DDR_BLOCK1_SIZE,
869 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
870 PTE_BLOCK_OUTER_SHARE |
877 __weak int dram_init(void)
880 #if !defined(CONFIG_SPL) || defined(CONFIG_SPL_BUILD)
881 /* This will break-before-make MMU for DDR */
882 update_early_mmu_table();