1 // SPDX-License-Identifier: GPL-2.0+
4 * Copyright 2014-2015 Freescale Semiconductor, Inc.
8 #include <fsl_ddr_sdram.h>
10 #include <linux/errno.h>
11 #include <asm/system.h>
13 #include <asm/armv8/mmu.h>
15 #include <asm/arch/fsl_serdes.h>
16 #include <asm/arch/soc.h>
17 #include <asm/arch/cpu.h>
18 #include <asm/arch/speed.h>
19 #include <fsl_immap.h>
20 #include <asm/arch/mp.h>
21 #include <efi_loader.h>
22 #include <fsl-mc/fsl_mc.h>
23 #ifdef CONFIG_FSL_ESDHC
24 #include <fsl_esdhc.h>
26 #include <asm/armv8/sec_firmware.h>
27 #ifdef CONFIG_SYS_FSL_DDR
30 #include <asm/arch/clock.h>
32 #include <fsl_qbman.h>
34 DECLARE_GLOBAL_DATA_PTR;
36 static struct cpu_type cpu_type_list[] = {
37 CPU_TYPE_ENTRY(LS2080A, LS2080A, 8),
38 CPU_TYPE_ENTRY(LS2085A, LS2085A, 8),
39 CPU_TYPE_ENTRY(LS2045A, LS2045A, 4),
40 CPU_TYPE_ENTRY(LS2088A, LS2088A, 8),
41 CPU_TYPE_ENTRY(LS2084A, LS2084A, 8),
42 CPU_TYPE_ENTRY(LS2048A, LS2048A, 4),
43 CPU_TYPE_ENTRY(LS2044A, LS2044A, 4),
44 CPU_TYPE_ENTRY(LS2081A, LS2081A, 8),
45 CPU_TYPE_ENTRY(LS2041A, LS2041A, 4),
46 CPU_TYPE_ENTRY(LS1043A, LS1043A, 4),
47 CPU_TYPE_ENTRY(LS1023A, LS1023A, 2),
48 CPU_TYPE_ENTRY(LS1046A, LS1046A, 4),
49 CPU_TYPE_ENTRY(LS1026A, LS1026A, 2),
50 CPU_TYPE_ENTRY(LS2040A, LS2040A, 4),
51 CPU_TYPE_ENTRY(LS1012A, LS1012A, 1),
52 CPU_TYPE_ENTRY(LS1088A, LS1088A, 8),
53 CPU_TYPE_ENTRY(LS1084A, LS1084A, 8),
54 CPU_TYPE_ENTRY(LS1048A, LS1048A, 4),
55 CPU_TYPE_ENTRY(LS1044A, LS1044A, 4),
58 #define EARLY_PGTABLE_SIZE 0x5000
59 static struct mm_region early_map[] = {
60 #ifdef CONFIG_FSL_LSCH3
61 { CONFIG_SYS_FSL_CCSR_BASE, CONFIG_SYS_FSL_CCSR_BASE,
62 CONFIG_SYS_FSL_CCSR_SIZE,
63 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
64 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
66 { CONFIG_SYS_FSL_OCRAM_BASE, CONFIG_SYS_FSL_OCRAM_BASE,
67 SYS_FSL_OCRAM_SPACE_SIZE,
68 PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
70 { CONFIG_SYS_FSL_QSPI_BASE1, CONFIG_SYS_FSL_QSPI_BASE1,
71 CONFIG_SYS_FSL_QSPI_SIZE1,
72 PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE},
74 /* For IFC Region #1, only the first 4MB is cache-enabled */
75 { CONFIG_SYS_FSL_IFC_BASE1, CONFIG_SYS_FSL_IFC_BASE1,
76 CONFIG_SYS_FSL_IFC_SIZE1_1,
77 PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
79 { CONFIG_SYS_FSL_IFC_BASE1 + CONFIG_SYS_FSL_IFC_SIZE1_1,
80 CONFIG_SYS_FSL_IFC_BASE1 + CONFIG_SYS_FSL_IFC_SIZE1_1,
81 CONFIG_SYS_FSL_IFC_SIZE1 - CONFIG_SYS_FSL_IFC_SIZE1_1,
82 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
84 { CONFIG_SYS_FLASH_BASE, CONFIG_SYS_FSL_IFC_BASE1,
85 CONFIG_SYS_FSL_IFC_SIZE1,
86 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
89 { CONFIG_SYS_FSL_DRAM_BASE1, CONFIG_SYS_FSL_DRAM_BASE1,
90 CONFIG_SYS_FSL_DRAM_SIZE1,
91 #if defined(CONFIG_TFABOOT) || \
92 (defined(CONFIG_SPL) && !defined(CONFIG_SPL_BUILD))
93 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
94 #else /* Start with nGnRnE and PXN and UXN to prevent speculative access */
95 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN |
97 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
100 /* Map IFC region #2 up to CONFIG_SYS_FLASH_BASE for NAND boot */
101 { CONFIG_SYS_FSL_IFC_BASE2, CONFIG_SYS_FSL_IFC_BASE2,
102 CONFIG_SYS_FLASH_BASE - CONFIG_SYS_FSL_IFC_BASE2,
103 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
106 { CONFIG_SYS_FSL_DCSR_BASE, CONFIG_SYS_FSL_DCSR_BASE,
107 CONFIG_SYS_FSL_DCSR_SIZE,
108 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
109 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
111 { CONFIG_SYS_FSL_DRAM_BASE2, CONFIG_SYS_FSL_DRAM_BASE2,
112 CONFIG_SYS_FSL_DRAM_SIZE2,
113 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN |
114 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
116 #elif defined(CONFIG_FSL_LSCH2)
117 { CONFIG_SYS_FSL_CCSR_BASE, CONFIG_SYS_FSL_CCSR_BASE,
118 CONFIG_SYS_FSL_CCSR_SIZE,
119 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
120 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
122 { CONFIG_SYS_FSL_OCRAM_BASE, CONFIG_SYS_FSL_OCRAM_BASE,
123 SYS_FSL_OCRAM_SPACE_SIZE,
124 PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
126 { CONFIG_SYS_FSL_DCSR_BASE, CONFIG_SYS_FSL_DCSR_BASE,
127 CONFIG_SYS_FSL_DCSR_SIZE,
128 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
129 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
131 { CONFIG_SYS_FSL_QSPI_BASE, CONFIG_SYS_FSL_QSPI_BASE,
132 CONFIG_SYS_FSL_QSPI_SIZE,
133 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
135 #ifdef CONFIG_FSL_IFC
136 { CONFIG_SYS_FSL_IFC_BASE, CONFIG_SYS_FSL_IFC_BASE,
137 CONFIG_SYS_FSL_IFC_SIZE,
138 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
141 { CONFIG_SYS_FSL_DRAM_BASE1, CONFIG_SYS_FSL_DRAM_BASE1,
142 CONFIG_SYS_FSL_DRAM_SIZE1,
143 #if defined(CONFIG_TFABOOT) || \
144 (defined(CONFIG_SPL) && !defined(CONFIG_SPL_BUILD))
145 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
146 #else /* Start with nGnRnE and PXN and UXN to prevent speculative access */
147 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN |
149 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
151 { CONFIG_SYS_FSL_DRAM_BASE2, CONFIG_SYS_FSL_DRAM_BASE2,
152 CONFIG_SYS_FSL_DRAM_SIZE2,
153 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN |
154 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
157 {}, /* list terminator */
160 static struct mm_region final_map[] = {
161 #ifdef CONFIG_FSL_LSCH3
162 { CONFIG_SYS_FSL_CCSR_BASE, CONFIG_SYS_FSL_CCSR_BASE,
163 CONFIG_SYS_FSL_CCSR_SIZE,
164 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
165 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
167 { CONFIG_SYS_FSL_OCRAM_BASE, CONFIG_SYS_FSL_OCRAM_BASE,
168 SYS_FSL_OCRAM_SPACE_SIZE,
169 PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
171 { CONFIG_SYS_FSL_DRAM_BASE1, CONFIG_SYS_FSL_DRAM_BASE1,
172 CONFIG_SYS_FSL_DRAM_SIZE1,
173 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
174 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
176 { CONFIG_SYS_FSL_QSPI_BASE1, CONFIG_SYS_FSL_QSPI_BASE1,
177 CONFIG_SYS_FSL_QSPI_SIZE1,
178 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
179 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
181 { CONFIG_SYS_FSL_QSPI_BASE2, CONFIG_SYS_FSL_QSPI_BASE2,
182 CONFIG_SYS_FSL_QSPI_SIZE2,
183 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
184 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
186 #ifdef CONFIG_FSL_IFC
187 { CONFIG_SYS_FSL_IFC_BASE2, CONFIG_SYS_FSL_IFC_BASE2,
188 CONFIG_SYS_FSL_IFC_SIZE2,
189 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
190 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
193 { CONFIG_SYS_FSL_DCSR_BASE, CONFIG_SYS_FSL_DCSR_BASE,
194 CONFIG_SYS_FSL_DCSR_SIZE,
195 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
196 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
198 { CONFIG_SYS_FSL_MC_BASE, CONFIG_SYS_FSL_MC_BASE,
199 CONFIG_SYS_FSL_MC_SIZE,
200 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
201 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
203 { CONFIG_SYS_FSL_NI_BASE, CONFIG_SYS_FSL_NI_BASE,
204 CONFIG_SYS_FSL_NI_SIZE,
205 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
206 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
208 /* For QBMAN portal, only the first 64MB is cache-enabled */
209 { CONFIG_SYS_FSL_QBMAN_BASE, CONFIG_SYS_FSL_QBMAN_BASE,
210 CONFIG_SYS_FSL_QBMAN_SIZE_1,
211 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
212 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN | PTE_BLOCK_NS
214 { CONFIG_SYS_FSL_QBMAN_BASE + CONFIG_SYS_FSL_QBMAN_SIZE_1,
215 CONFIG_SYS_FSL_QBMAN_BASE + CONFIG_SYS_FSL_QBMAN_SIZE_1,
216 CONFIG_SYS_FSL_QBMAN_SIZE - CONFIG_SYS_FSL_QBMAN_SIZE_1,
217 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
218 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
220 { CONFIG_SYS_PCIE1_PHYS_ADDR, CONFIG_SYS_PCIE1_PHYS_ADDR,
221 CONFIG_SYS_PCIE1_PHYS_SIZE,
222 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
223 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
225 { CONFIG_SYS_PCIE2_PHYS_ADDR, CONFIG_SYS_PCIE2_PHYS_ADDR,
226 CONFIG_SYS_PCIE2_PHYS_SIZE,
227 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
228 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
230 { CONFIG_SYS_PCIE3_PHYS_ADDR, CONFIG_SYS_PCIE3_PHYS_ADDR,
231 CONFIG_SYS_PCIE3_PHYS_SIZE,
232 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
233 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
235 #ifdef CONFIG_ARCH_LS2080A
236 { CONFIG_SYS_PCIE4_PHYS_ADDR, CONFIG_SYS_PCIE4_PHYS_ADDR,
237 CONFIG_SYS_PCIE4_PHYS_SIZE,
238 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
239 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
242 { CONFIG_SYS_FSL_WRIOP1_BASE, CONFIG_SYS_FSL_WRIOP1_BASE,
243 CONFIG_SYS_FSL_WRIOP1_SIZE,
244 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
245 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
247 { CONFIG_SYS_FSL_AIOP1_BASE, CONFIG_SYS_FSL_AIOP1_BASE,
248 CONFIG_SYS_FSL_AIOP1_SIZE,
249 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
250 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
252 { CONFIG_SYS_FSL_PEBUF_BASE, CONFIG_SYS_FSL_PEBUF_BASE,
253 CONFIG_SYS_FSL_PEBUF_SIZE,
254 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
255 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
257 { CONFIG_SYS_FSL_DRAM_BASE2, CONFIG_SYS_FSL_DRAM_BASE2,
258 CONFIG_SYS_FSL_DRAM_SIZE2,
259 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
260 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
262 #elif defined(CONFIG_FSL_LSCH2)
263 { CONFIG_SYS_FSL_BOOTROM_BASE, CONFIG_SYS_FSL_BOOTROM_BASE,
264 CONFIG_SYS_FSL_BOOTROM_SIZE,
265 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
266 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
268 { CONFIG_SYS_FSL_CCSR_BASE, CONFIG_SYS_FSL_CCSR_BASE,
269 CONFIG_SYS_FSL_CCSR_SIZE,
270 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
271 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
273 { CONFIG_SYS_FSL_OCRAM_BASE, CONFIG_SYS_FSL_OCRAM_BASE,
274 SYS_FSL_OCRAM_SPACE_SIZE,
275 PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
277 { CONFIG_SYS_FSL_DCSR_BASE, CONFIG_SYS_FSL_DCSR_BASE,
278 CONFIG_SYS_FSL_DCSR_SIZE,
279 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
280 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
282 { CONFIG_SYS_FSL_QSPI_BASE, CONFIG_SYS_FSL_QSPI_BASE,
283 CONFIG_SYS_FSL_QSPI_SIZE,
284 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
285 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
287 #ifdef CONFIG_FSL_IFC
288 { CONFIG_SYS_FSL_IFC_BASE, CONFIG_SYS_FSL_IFC_BASE,
289 CONFIG_SYS_FSL_IFC_SIZE,
290 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
293 { CONFIG_SYS_FSL_DRAM_BASE1, CONFIG_SYS_FSL_DRAM_BASE1,
294 CONFIG_SYS_FSL_DRAM_SIZE1,
295 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
296 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
298 { CONFIG_SYS_FSL_QBMAN_BASE, CONFIG_SYS_FSL_QBMAN_BASE,
299 CONFIG_SYS_FSL_QBMAN_SIZE,
300 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
301 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
303 { CONFIG_SYS_FSL_DRAM_BASE2, CONFIG_SYS_FSL_DRAM_BASE2,
304 CONFIG_SYS_FSL_DRAM_SIZE2,
305 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
306 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
308 { CONFIG_SYS_PCIE1_PHYS_ADDR, CONFIG_SYS_PCIE1_PHYS_ADDR,
309 CONFIG_SYS_PCIE1_PHYS_SIZE,
310 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
311 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
313 { CONFIG_SYS_PCIE2_PHYS_ADDR, CONFIG_SYS_PCIE2_PHYS_ADDR,
314 CONFIG_SYS_PCIE2_PHYS_SIZE,
315 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
316 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
318 { CONFIG_SYS_PCIE3_PHYS_ADDR, CONFIG_SYS_PCIE3_PHYS_ADDR,
319 CONFIG_SYS_PCIE3_PHYS_SIZE,
320 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
321 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
323 { CONFIG_SYS_FSL_DRAM_BASE3, CONFIG_SYS_FSL_DRAM_BASE3,
324 CONFIG_SYS_FSL_DRAM_SIZE3,
325 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
326 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
329 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
330 {}, /* space holder for secure mem */
335 struct mm_region *mem_map = early_map;
337 void cpu_name(char *name)
339 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
340 unsigned int i, svr, ver;
342 svr = gur_in32(&gur->svr);
343 ver = SVR_SOC_VER(svr);
345 for (i = 0; i < ARRAY_SIZE(cpu_type_list); i++)
346 if ((cpu_type_list[i].soc_ver & SVR_WO_E) == ver) {
347 strcpy(name, cpu_type_list[i].name);
349 if (IS_E_PROCESSOR(svr))
352 sprintf(name + strlen(name), " Rev%d.%d",
353 SVR_MAJ(svr), SVR_MIN(svr));
357 if (i == ARRAY_SIZE(cpu_type_list))
358 strcpy(name, "unknown");
361 #ifndef CONFIG_SYS_DCACHE_OFF
363 * To start MMU before DDR is available, we create MMU table in SRAM.
364 * The base address of SRAM is CONFIG_SYS_FSL_OCRAM_BASE. We use three
365 * levels of translation tables here to cover 40-bit address space.
366 * We use 4KB granule size, with 40 bits physical address, T0SZ=24
367 * Address above EARLY_PGTABLE_SIZE (0x5000) is free for other purpose.
368 * Note, the debug print in cache_v8.c is not usable for debugging
369 * these early MMU tables because UART is not yet available.
371 static inline void early_mmu_setup(void)
373 unsigned int el = current_el();
375 /* global data is already setup, no allocation yet */
377 gd->arch.tlb_addr = CONFIG_SYS_FSL_OCRAM_BASE;
379 gd->arch.tlb_addr = CONFIG_SYS_DDR_SDRAM_BASE;
380 gd->arch.tlb_fillptr = gd->arch.tlb_addr;
381 gd->arch.tlb_size = EARLY_PGTABLE_SIZE;
383 /* Create early page tables */
386 /* point TTBR to the new table */
387 set_ttbr_tcr_mair(el, gd->arch.tlb_addr,
388 get_tcr(el, NULL, NULL) &
389 ~(TCR_ORGN_MASK | TCR_IRGN_MASK),
392 set_sctlr(get_sctlr() | CR_M);
395 static void fix_pcie_mmu_map(void)
397 #ifdef CONFIG_ARCH_LS2080A
400 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
402 svr = gur_in32(&gur->svr);
403 ver = SVR_SOC_VER(svr);
405 /* Fix PCIE base and size for LS2088A */
406 if ((ver == SVR_LS2088A) || (ver == SVR_LS2084A) ||
407 (ver == SVR_LS2048A) || (ver == SVR_LS2044A) ||
408 (ver == SVR_LS2081A) || (ver == SVR_LS2041A)) {
409 for (i = 0; i < ARRAY_SIZE(final_map); i++) {
410 switch (final_map[i].phys) {
411 case CONFIG_SYS_PCIE1_PHYS_ADDR:
412 final_map[i].phys = 0x2000000000ULL;
413 final_map[i].virt = 0x2000000000ULL;
414 final_map[i].size = 0x800000000ULL;
416 case CONFIG_SYS_PCIE2_PHYS_ADDR:
417 final_map[i].phys = 0x2800000000ULL;
418 final_map[i].virt = 0x2800000000ULL;
419 final_map[i].size = 0x800000000ULL;
421 case CONFIG_SYS_PCIE3_PHYS_ADDR:
422 final_map[i].phys = 0x3000000000ULL;
423 final_map[i].virt = 0x3000000000ULL;
424 final_map[i].size = 0x800000000ULL;
426 case CONFIG_SYS_PCIE4_PHYS_ADDR:
427 final_map[i].phys = 0x3800000000ULL;
428 final_map[i].virt = 0x3800000000ULL;
429 final_map[i].size = 0x800000000ULL;
440 * The final tables look similar to early tables, but different in detail.
441 * These tables are in DRAM. Sub tables are added to enable cache for
444 * Put the MMU table in secure memory if gd->arch.secure_ram is valid.
445 * OCRAM will be not used for this purpose so gd->arch.secure_ram can't be 0.
447 static inline void final_mmu_setup(void)
449 u64 tlb_addr_save = gd->arch.tlb_addr;
450 unsigned int el = current_el();
453 /* fix the final_map before filling in the block entries */
458 /* Update mapping for DDR to actual size */
459 for (index = 0; index < ARRAY_SIZE(final_map) - 2; index++) {
461 * Find the entry for DDR mapping and update the address and
462 * size. Zero-sized mapping will be skipped when creating MMU
465 switch (final_map[index].virt) {
466 case CONFIG_SYS_FSL_DRAM_BASE1:
467 final_map[index].virt = gd->bd->bi_dram[0].start;
468 final_map[index].phys = gd->bd->bi_dram[0].start;
469 final_map[index].size = gd->bd->bi_dram[0].size;
471 #ifdef CONFIG_SYS_FSL_DRAM_BASE2
472 case CONFIG_SYS_FSL_DRAM_BASE2:
473 #if (CONFIG_NR_DRAM_BANKS >= 2)
474 final_map[index].virt = gd->bd->bi_dram[1].start;
475 final_map[index].phys = gd->bd->bi_dram[1].start;
476 final_map[index].size = gd->bd->bi_dram[1].size;
478 final_map[index].size = 0;
482 #ifdef CONFIG_SYS_FSL_DRAM_BASE3
483 case CONFIG_SYS_FSL_DRAM_BASE3:
484 #if (CONFIG_NR_DRAM_BANKS >= 3)
485 final_map[index].virt = gd->bd->bi_dram[2].start;
486 final_map[index].phys = gd->bd->bi_dram[2].start;
487 final_map[index].size = gd->bd->bi_dram[2].size;
489 final_map[index].size = 0;
498 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
499 if (gd->arch.secure_ram & MEM_RESERVE_SECURE_MAINTAINED) {
502 * Only use gd->arch.secure_ram if the address is
503 * recalculated. Align to 4KB for MMU table.
505 /* put page tables in secure ram */
506 index = ARRAY_SIZE(final_map) - 2;
507 gd->arch.tlb_addr = gd->arch.secure_ram & ~0xfff;
508 final_map[index].virt = gd->arch.secure_ram & ~0x3;
509 final_map[index].phys = final_map[index].virt;
510 final_map[index].size = CONFIG_SYS_MEM_RESERVE_SECURE;
511 final_map[index].attrs = PTE_BLOCK_OUTER_SHARE;
512 gd->arch.secure_ram |= MEM_RESERVE_SECURE_SECURED;
513 tlb_addr_save = gd->arch.tlb_addr;
515 /* Use allocated (board_f.c) memory for TLB */
516 tlb_addr_save = gd->arch.tlb_allocated;
517 gd->arch.tlb_addr = tlb_addr_save;
522 /* Reset the fill ptr */
523 gd->arch.tlb_fillptr = tlb_addr_save;
525 /* Create normal system page tables */
528 /* Create emergency page tables */
529 gd->arch.tlb_addr = gd->arch.tlb_fillptr;
530 gd->arch.tlb_emerg = gd->arch.tlb_addr;
532 gd->arch.tlb_addr = tlb_addr_save;
534 /* Disable cache and MMU */
535 dcache_disable(); /* TLBs are invalidated */
536 invalidate_icache_all();
538 /* point TTBR to the new table */
539 set_ttbr_tcr_mair(el, gd->arch.tlb_addr, get_tcr(el, NULL, NULL),
542 set_sctlr(get_sctlr() | CR_M);
545 u64 get_page_table_size(void)
550 int arch_cpu_init(void)
553 * This function is called before U-Boot relocates itself to speed up
554 * on system running. It is not necessary to run if performance is not
555 * critical. Skip if MMU is already enabled by SPL or other means.
557 if (get_sctlr() & CR_M)
561 __asm_invalidate_dcache_all();
562 __asm_invalidate_tlb_all();
564 set_sctlr(get_sctlr() | CR_C);
574 * This function is called from common/board_r.c.
575 * It recreates MMU table in main memory.
577 void enable_caches(void)
580 __asm_invalidate_tlb_all();
586 u32 initiator_type(u32 cluster, int init_id)
588 struct ccsr_gur *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
589 u32 idx = (cluster >> (init_id * 8)) & TP_CLUSTER_INIT_MASK;
592 type = gur_in32(&gur->tp_ityp[idx]);
593 if (type & TP_ITYP_AV)
599 u32 cpu_pos_mask(void)
601 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
603 u32 cluster, type, mask = 0;
608 cluster = gur_in32(&gur->tp_cluster[i].lower);
609 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
610 type = initiator_type(cluster, j);
611 if (type && (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM))
612 mask |= 1 << (i * TP_INIT_PER_CLUSTER + j);
615 } while ((cluster & TP_CLUSTER_EOC) == 0x0);
622 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
623 int i = 0, count = 0;
624 u32 cluster, type, mask = 0;
629 cluster = gur_in32(&gur->tp_cluster[i].lower);
630 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
631 type = initiator_type(cluster, j);
633 if (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM)
639 } while ((cluster & TP_CLUSTER_EOC) == 0x0);
645 * Return the number of cores on this SOC.
647 int cpu_numcores(void)
649 return hweight32(cpu_mask());
652 int fsl_qoriq_core_to_cluster(unsigned int core)
654 struct ccsr_gur __iomem *gur =
655 (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR);
656 int i = 0, count = 0;
662 cluster = gur_in32(&gur->tp_cluster[i].lower);
663 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
664 if (initiator_type(cluster, j)) {
671 } while ((cluster & TP_CLUSTER_EOC) == 0x0);
673 return -1; /* cannot identify the cluster */
676 u32 fsl_qoriq_core_to_type(unsigned int core)
678 struct ccsr_gur __iomem *gur =
679 (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR);
680 int i = 0, count = 0;
686 cluster = gur_in32(&gur->tp_cluster[i].lower);
687 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
688 type = initiator_type(cluster, j);
696 } while ((cluster & TP_CLUSTER_EOC) == 0x0);
698 return -1; /* cannot identify the cluster */
701 #ifndef CONFIG_FSL_LSCH3
704 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
706 return gur_in32(&gur->svr);
710 #ifdef CONFIG_DISPLAY_CPUINFO
711 int print_cpuinfo(void)
713 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
714 struct sys_info sysinfo;
716 unsigned int i, core;
717 u32 type, rcw, svr = gur_in32(&gur->svr);
722 printf(" %s (0x%x)\n", buf, svr);
723 memset((u8 *)buf, 0x00, ARRAY_SIZE(buf));
724 get_sys_info(&sysinfo);
725 puts("Clock Configuration:");
726 for_each_cpu(i, core, cpu_numcores(), cpu_mask()) {
729 type = TP_ITYP_VER(fsl_qoriq_core_to_type(core));
730 printf("CPU%d(%s):%-4s MHz ", core,
731 type == TY_ITYP_VER_A7 ? "A7 " :
732 (type == TY_ITYP_VER_A53 ? "A53" :
733 (type == TY_ITYP_VER_A57 ? "A57" :
734 (type == TY_ITYP_VER_A72 ? "A72" : " "))),
735 strmhz(buf, sysinfo.freq_processor[core]));
737 /* Display platform clock as Bus frequency. */
738 printf("\n Bus: %-4s MHz ",
739 strmhz(buf, sysinfo.freq_systembus / CONFIG_SYS_FSL_PCLK_DIV));
740 printf("DDR: %-4s MT/s", strmhz(buf, sysinfo.freq_ddrbus));
741 #ifdef CONFIG_SYS_DPAA_FMAN
742 printf(" FMAN: %-4s MHz", strmhz(buf, sysinfo.freq_fman[0]));
744 #ifdef CONFIG_SYS_FSL_HAS_DP_DDR
745 if (soc_has_dp_ddr()) {
746 printf(" DP-DDR: %-4s MT/s",
747 strmhz(buf, sysinfo.freq_ddrbus2));
753 * Display the RCW, so that no one gets confused as to what RCW
754 * we're actually using for this boot.
756 puts("Reset Configuration Word (RCW):");
757 for (i = 0; i < ARRAY_SIZE(gur->rcwsr); i++) {
758 rcw = gur_in32(&gur->rcwsr[i]);
760 printf("\n %08x:", i * 4);
761 printf(" %08x", rcw);
769 #ifdef CONFIG_FSL_ESDHC
770 int cpu_mmc_init(bd_t *bis)
772 return fsl_esdhc_mmc_init(bis);
776 int cpu_eth_init(bd_t *bis)
780 #if defined(CONFIG_FSL_MC_ENET) && !defined(CONFIG_SPL_BUILD)
781 error = fsl_mc_ldpaa_init(bis);
783 #ifdef CONFIG_FMAN_ENET
784 fm_standard_init(bis);
789 static inline int check_psci(void)
791 unsigned int psci_ver;
793 psci_ver = sec_firmware_support_psci_version();
794 if (psci_ver == PSCI_INVALID_VER)
800 static void config_core_prefetch(void)
803 char buffer[HWCONFIG_BUFFER_SIZE];
804 const char *prefetch_arg = NULL;
809 if (env_get_f("hwconfig", buffer, sizeof(buffer)) > 0)
812 prefetch_arg = hwconfig_subarg_f("core_prefetch", "disable",
816 mask = simple_strtoul(prefetch_arg, NULL, 0) & 0xff;
818 printf("Core0 prefetch can't be disabled\n");
822 #define SIP_PREFETCH_DISABLE_64 0xC200FF13
823 regs.regs[0] = SIP_PREFETCH_DISABLE_64;
828 printf("Prefetch disable config failed for mask ");
830 printf("Prefetch disable config passed for mask ");
831 printf("0x%x\n", mask);
835 int arch_early_init_r(void)
837 #ifdef CONFIG_SYS_FSL_ERRATUM_A009635
840 * erratum A009635 is valid only for LS2080A SoC and
843 svr_dev_id = get_svr();
844 if (IS_SVR_DEV(svr_dev_id, SVR_DEV(SVR_LS2080A)))
847 #if defined(CONFIG_SYS_FSL_ERRATUM_A009942) && defined(CONFIG_SYS_FSL_DDR)
848 erratum_a009942_check_cpo();
851 debug("PSCI: PSCI does not exist.\n");
853 /* if PSCI does not exist, boot secondary cores here */
854 if (fsl_layerscape_wake_seconday_cores())
855 printf("Did not wake secondary cores\n");
858 #ifdef CONFIG_SYS_FSL_HAS_RGMII
862 config_core_prefetch();
864 #ifdef CONFIG_SYS_HAS_SERDES
867 #ifdef CONFIG_FMAN_ENET
870 #ifdef CONFIG_SYS_DPAA_QBMAN
871 setup_qbman_portals();
878 u32 __iomem *cntcr = (u32 *)CONFIG_SYS_FSL_TIMER_ADDR;
879 #ifdef CONFIG_FSL_LSCH3
880 u32 __iomem *cltbenr = (u32 *)CONFIG_SYS_FSL_PMU_CLTBENR;
882 #if defined(CONFIG_ARCH_LS2080A) || defined(CONFIG_ARCH_LS1088A)
883 u32 __iomem *pctbenr = (u32 *)FSL_PMU_PCTBENR_OFFSET;
886 #ifdef COUNTER_FREQUENCY_REAL
887 unsigned long cntfrq = COUNTER_FREQUENCY_REAL;
889 /* Update with accurate clock frequency */
890 if (current_el() == 3)
891 asm volatile("msr cntfrq_el0, %0" : : "r" (cntfrq) : "memory");
894 #ifdef CONFIG_FSL_LSCH3
895 /* Enable timebase for all clusters.
896 * It is safe to do so even some clusters are not enabled.
898 out_le32(cltbenr, 0xf);
901 #if defined(CONFIG_ARCH_LS2080A) || defined(CONFIG_ARCH_LS1088A)
903 * In certain Layerscape SoCs, the clock for each core's
904 * has an enable bit in the PMU Physical Core Time Base Enable
905 * Register (PCTBENR), which allows the watchdog to operate.
907 setbits_le32(pctbenr, 0xff);
909 * For LS2080A SoC and its personalities, timer controller
910 * offset is different
912 svr_dev_id = get_svr();
913 if (IS_SVR_DEV(svr_dev_id, SVR_DEV(SVR_LS2080A)))
914 cntcr = (u32 *)SYS_FSL_LS2080A_LS2085A_TIMER_ADDR;
918 /* Enable clock for timer
919 * This is a global setting.
921 out_le32(cntcr, 0x1);
926 __efi_runtime_data u32 __iomem *rstcr = (u32 *)CONFIG_SYS_FSL_RST_ADDR;
928 void __efi_runtime reset_cpu(ulong addr)
932 /* Raise RESET_REQ_B */
933 val = scfg_in32(rstcr);
935 scfg_out32(rstcr, val);
938 #ifdef CONFIG_EFI_LOADER
940 void __efi_runtime EFIAPI efi_reset_system(
941 enum efi_reset_type reset_type,
942 efi_status_t reset_status,
943 unsigned long data_size, void *reset_data)
945 switch (reset_type) {
948 case EFI_RESET_PLATFORM_SPECIFIC:
951 case EFI_RESET_SHUTDOWN:
952 /* Nothing we can do */
959 efi_status_t efi_reset_system_init(void)
961 return efi_add_runtime_mmio(&rstcr, sizeof(*rstcr));
967 * Calculate reserved memory with given memory bank
968 * Return aligned memory size on success
969 * Return (ram_size + needed size) for failure
971 phys_size_t board_reserve_ram_top(phys_size_t ram_size)
973 phys_size_t ram_top = ram_size;
975 #if defined(CONFIG_FSL_MC_ENET) && !defined(CONFIG_SPL_BUILD)
976 ram_top = mc_get_dram_block_size();
977 if (ram_top > ram_size)
978 return ram_size + ram_top;
980 ram_top = ram_size - ram_top;
981 /* The start address of MC reserved memory needs to be aligned. */
982 ram_top &= ~(CONFIG_SYS_MC_RSV_MEM_ALIGN - 1);
985 return ram_size - ram_top;
988 phys_size_t get_effective_memsize(void)
990 phys_size_t ea_size, rem = 0;
993 * For ARMv8 SoCs, DDR memory is split into two or three regions. The
994 * first region is 2GB space at 0x8000_0000. Secure memory needs to
995 * allocated from first region. If the memory extends to the second
996 * region (or the third region if applicable), Management Complex (MC)
997 * memory should be put into the highest region, i.e. the end of DDR
998 * memory. CONFIG_MAX_MEM_MAPPED is set to the size of first region so
999 * U-Boot doesn't relocate itself into higher address. Should DDR be
1000 * configured to skip the first region, this function needs to be
1003 if (gd->ram_size > CONFIG_MAX_MEM_MAPPED) {
1004 ea_size = CONFIG_MAX_MEM_MAPPED;
1005 rem = gd->ram_size - ea_size;
1007 ea_size = gd->ram_size;
1010 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
1011 /* Check if we have enough space for secure memory */
1012 if (ea_size > CONFIG_SYS_MEM_RESERVE_SECURE)
1013 ea_size -= CONFIG_SYS_MEM_RESERVE_SECURE;
1015 printf("Error: No enough space for secure memory.\n");
1017 /* Check if we have enough memory for MC */
1018 if (rem < board_reserve_ram_top(rem)) {
1019 /* Not enough memory in high region to reserve */
1020 if (ea_size > board_reserve_ram_top(ea_size))
1021 ea_size -= board_reserve_ram_top(ea_size);
1023 printf("Error: No enough space for reserved memory.\n");
1029 int dram_init_banksize(void)
1031 #ifdef CONFIG_SYS_DP_DDR_BASE_PHY
1032 phys_size_t dp_ddr_size;
1036 * gd->ram_size has the total size of DDR memory, less reserved secure
1037 * memory. The DDR extends from low region to high region(s) presuming
1038 * no hole is created with DDR configuration. gd->arch.secure_ram tracks
1039 * the location of secure memory. gd->arch.resv_ram tracks the location
1040 * of reserved memory for Management Complex (MC). Because gd->ram_size
1041 * is reduced by this function if secure memory is reserved, checking
1042 * gd->arch.secure_ram should be done to avoid running it repeatedly.
1045 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
1046 if (gd->arch.secure_ram & MEM_RESERVE_SECURE_MAINTAINED) {
1047 debug("No need to run again, skip %s\n", __func__);
1053 gd->bd->bi_dram[0].start = CONFIG_SYS_SDRAM_BASE;
1054 if (gd->ram_size > CONFIG_SYS_DDR_BLOCK1_SIZE) {
1055 gd->bd->bi_dram[0].size = CONFIG_SYS_DDR_BLOCK1_SIZE;
1056 gd->bd->bi_dram[1].start = CONFIG_SYS_DDR_BLOCK2_BASE;
1057 gd->bd->bi_dram[1].size = gd->ram_size -
1058 CONFIG_SYS_DDR_BLOCK1_SIZE;
1059 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
1060 if (gd->bi_dram[1].size > CONFIG_SYS_DDR_BLOCK2_SIZE) {
1061 gd->bd->bi_dram[2].start = CONFIG_SYS_DDR_BLOCK3_BASE;
1062 gd->bd->bi_dram[2].size = gd->bd->bi_dram[1].size -
1063 CONFIG_SYS_DDR_BLOCK2_SIZE;
1064 gd->bd->bi_dram[1].size = CONFIG_SYS_DDR_BLOCK2_SIZE;
1068 gd->bd->bi_dram[0].size = gd->ram_size;
1070 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
1071 if (gd->bd->bi_dram[0].size >
1072 CONFIG_SYS_MEM_RESERVE_SECURE) {
1073 gd->bd->bi_dram[0].size -=
1074 CONFIG_SYS_MEM_RESERVE_SECURE;
1075 gd->arch.secure_ram = gd->bd->bi_dram[0].start +
1076 gd->bd->bi_dram[0].size;
1077 gd->arch.secure_ram |= MEM_RESERVE_SECURE_MAINTAINED;
1078 gd->ram_size -= CONFIG_SYS_MEM_RESERVE_SECURE;
1080 #endif /* CONFIG_SYS_MEM_RESERVE_SECURE */
1082 #if defined(CONFIG_FSL_MC_ENET) && !defined(CONFIG_SPL_BUILD)
1083 /* Assign memory for MC */
1084 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
1085 if (gd->bd->bi_dram[2].size >=
1086 board_reserve_ram_top(gd->bd->bi_dram[2].size)) {
1087 gd->arch.resv_ram = gd->bd->bi_dram[2].start +
1088 gd->bd->bi_dram[2].size -
1089 board_reserve_ram_top(gd->bd->bi_dram[2].size);
1093 if (gd->bd->bi_dram[1].size >=
1094 board_reserve_ram_top(gd->bd->bi_dram[1].size)) {
1095 gd->arch.resv_ram = gd->bd->bi_dram[1].start +
1096 gd->bd->bi_dram[1].size -
1097 board_reserve_ram_top(gd->bd->bi_dram[1].size);
1098 } else if (gd->bd->bi_dram[0].size >
1099 board_reserve_ram_top(gd->bd->bi_dram[0].size)) {
1100 gd->arch.resv_ram = gd->bd->bi_dram[0].start +
1101 gd->bd->bi_dram[0].size -
1102 board_reserve_ram_top(gd->bd->bi_dram[0].size);
1105 #endif /* CONFIG_FSL_MC_ENET */
1107 #ifdef CONFIG_SYS_DP_DDR_BASE_PHY
1108 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
1109 #error "This SoC shouldn't have DP DDR"
1111 if (soc_has_dp_ddr()) {
1112 /* initialize DP-DDR here */
1115 * DDR controller use 0 as the base address for binding.
1116 * It is mapped to CONFIG_SYS_DP_DDR_BASE for core to access.
1118 dp_ddr_size = fsl_other_ddr_sdram(CONFIG_SYS_DP_DDR_BASE_PHY,
1120 CONFIG_DP_DDR_NUM_CTRLS,
1121 CONFIG_DP_DDR_DIMM_SLOTS_PER_CTLR,
1124 gd->bd->bi_dram[2].start = CONFIG_SYS_DP_DDR_BASE;
1125 gd->bd->bi_dram[2].size = dp_ddr_size;
1127 puts("Not detected");
1132 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
1133 debug("%s is called. gd->ram_size is reduced to %lu\n",
1134 __func__, (ulong)gd->ram_size);
1140 #if CONFIG_IS_ENABLED(EFI_LOADER)
1141 void efi_add_known_memory(void)
1144 phys_addr_t ram_start, start;
1145 phys_size_t ram_size;
1149 for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
1150 #ifdef CONFIG_SYS_DP_DDR_BASE_PHY
1151 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
1152 #error "This SoC shouldn't have DP DDR"
1155 continue; /* skip DP-DDR */
1157 ram_start = gd->bd->bi_dram[i].start;
1158 ram_size = gd->bd->bi_dram[i].size;
1159 #ifdef CONFIG_RESV_RAM
1160 if (gd->arch.resv_ram >= ram_start &&
1161 gd->arch.resv_ram < ram_start + ram_size)
1162 ram_size = gd->arch.resv_ram - ram_start;
1164 start = (ram_start + EFI_PAGE_MASK) & ~EFI_PAGE_MASK;
1165 pages = (ram_size + EFI_PAGE_MASK) >> EFI_PAGE_SHIFT;
1167 efi_add_memory_map(start, pages, EFI_CONVENTIONAL_MEMORY,
1174 * Before DDR size is known, early MMU table have DDR mapped as device memory
1175 * to avoid speculative access. To relocate U-Boot to DDR, "normal memory"
1176 * needs to be set for these mappings.
1177 * If a special case configures DDR with holes in the mapping, the holes need
1178 * to be marked as invalid. This is not implemented in this function.
1180 void update_early_mmu_table(void)
1182 if (!gd->arch.tlb_addr)
1185 if (gd->ram_size <= CONFIG_SYS_FSL_DRAM_SIZE1) {
1186 mmu_change_region_attr(
1187 CONFIG_SYS_SDRAM_BASE,
1189 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
1190 PTE_BLOCK_OUTER_SHARE |
1194 mmu_change_region_attr(
1195 CONFIG_SYS_SDRAM_BASE,
1196 CONFIG_SYS_DDR_BLOCK1_SIZE,
1197 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
1198 PTE_BLOCK_OUTER_SHARE |
1201 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
1202 #ifndef CONFIG_SYS_DDR_BLOCK2_SIZE
1203 #error "Missing CONFIG_SYS_DDR_BLOCK2_SIZE"
1205 if (gd->ram_size - CONFIG_SYS_DDR_BLOCK1_SIZE >
1206 CONFIG_SYS_DDR_BLOCK2_SIZE) {
1207 mmu_change_region_attr(
1208 CONFIG_SYS_DDR_BLOCK2_BASE,
1209 CONFIG_SYS_DDR_BLOCK2_SIZE,
1210 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
1211 PTE_BLOCK_OUTER_SHARE |
1214 mmu_change_region_attr(
1215 CONFIG_SYS_DDR_BLOCK3_BASE,
1217 CONFIG_SYS_DDR_BLOCK1_SIZE -
1218 CONFIG_SYS_DDR_BLOCK2_SIZE,
1219 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
1220 PTE_BLOCK_OUTER_SHARE |
1226 mmu_change_region_attr(
1227 CONFIG_SYS_DDR_BLOCK2_BASE,
1229 CONFIG_SYS_DDR_BLOCK1_SIZE,
1230 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
1231 PTE_BLOCK_OUTER_SHARE |
1238 __weak int dram_init(void)
1241 #if (!defined(CONFIG_SPL) && !defined(CONFIG_TFABOOT)) || \
1242 defined(CONFIG_SPL_BUILD)
1243 /* This will break-before-make MMU for DDR */
1244 update_early_mmu_table();