1 // SPDX-License-Identifier: GPL-2.0+
4 * Copyright 2014-2015 Freescale Semiconductor, Inc.
8 #include <fsl_ddr_sdram.h>
10 #include <linux/errno.h>
11 #include <asm/system.h>
13 #include <asm/armv8/mmu.h>
15 #include <asm/arch/fsl_serdes.h>
16 #include <asm/arch/soc.h>
17 #include <asm/arch/cpu.h>
18 #include <asm/arch/speed.h>
19 #include <fsl_immap.h>
20 #include <asm/arch/mp.h>
21 #include <efi_loader.h>
22 #include <fsl-mc/fsl_mc.h>
23 #ifdef CONFIG_FSL_ESDHC
24 #include <fsl_esdhc.h>
26 #include <asm/armv8/sec_firmware.h>
27 #ifdef CONFIG_SYS_FSL_DDR
30 #include <asm/arch/clock.h>
32 #include <fsl_qbman.h>
35 #include <environment.h>
36 #ifdef CONFIG_CHAIN_OF_TRUST
37 #include <fsl_validate.h>
41 DECLARE_GLOBAL_DATA_PTR;
43 static struct cpu_type cpu_type_list[] = {
44 CPU_TYPE_ENTRY(LS2080A, LS2080A, 8),
45 CPU_TYPE_ENTRY(LS2085A, LS2085A, 8),
46 CPU_TYPE_ENTRY(LS2045A, LS2045A, 4),
47 CPU_TYPE_ENTRY(LS2088A, LS2088A, 8),
48 CPU_TYPE_ENTRY(LS2084A, LS2084A, 8),
49 CPU_TYPE_ENTRY(LS2048A, LS2048A, 4),
50 CPU_TYPE_ENTRY(LS2044A, LS2044A, 4),
51 CPU_TYPE_ENTRY(LS2081A, LS2081A, 8),
52 CPU_TYPE_ENTRY(LS2041A, LS2041A, 4),
53 CPU_TYPE_ENTRY(LS1043A, LS1043A, 4),
54 CPU_TYPE_ENTRY(LS1043A, LS1043A_P23, 4),
55 CPU_TYPE_ENTRY(LS1023A, LS1023A, 2),
56 CPU_TYPE_ENTRY(LS1023A, LS1023A_P23, 2),
57 CPU_TYPE_ENTRY(LS1046A, LS1046A, 4),
58 CPU_TYPE_ENTRY(LS1026A, LS1026A, 2),
59 CPU_TYPE_ENTRY(LS2040A, LS2040A, 4),
60 CPU_TYPE_ENTRY(LS1012A, LS1012A, 1),
61 CPU_TYPE_ENTRY(LS1088A, LS1088A, 8),
62 CPU_TYPE_ENTRY(LS1084A, LS1084A, 8),
63 CPU_TYPE_ENTRY(LS1048A, LS1048A, 4),
64 CPU_TYPE_ENTRY(LS1044A, LS1044A, 4),
65 CPU_TYPE_ENTRY(LX2160A, LX2160A, 16),
66 CPU_TYPE_ENTRY(LX2120A, LX2120A, 12),
67 CPU_TYPE_ENTRY(LX2080A, LX2080A, 8),
70 #define EARLY_PGTABLE_SIZE 0x5000
71 static struct mm_region early_map[] = {
72 #ifdef CONFIG_FSL_LSCH3
73 { CONFIG_SYS_FSL_CCSR_BASE, CONFIG_SYS_FSL_CCSR_BASE,
74 CONFIG_SYS_FSL_CCSR_SIZE,
75 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
76 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
78 { CONFIG_SYS_FSL_OCRAM_BASE, CONFIG_SYS_FSL_OCRAM_BASE,
79 SYS_FSL_OCRAM_SPACE_SIZE,
80 PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
82 { CONFIG_SYS_FSL_QSPI_BASE1, CONFIG_SYS_FSL_QSPI_BASE1,
83 CONFIG_SYS_FSL_QSPI_SIZE1,
84 PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE},
86 /* For IFC Region #1, only the first 4MB is cache-enabled */
87 { CONFIG_SYS_FSL_IFC_BASE1, CONFIG_SYS_FSL_IFC_BASE1,
88 CONFIG_SYS_FSL_IFC_SIZE1_1,
89 PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
91 { CONFIG_SYS_FSL_IFC_BASE1 + CONFIG_SYS_FSL_IFC_SIZE1_1,
92 CONFIG_SYS_FSL_IFC_BASE1 + CONFIG_SYS_FSL_IFC_SIZE1_1,
93 CONFIG_SYS_FSL_IFC_SIZE1 - CONFIG_SYS_FSL_IFC_SIZE1_1,
94 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
96 { CONFIG_SYS_FLASH_BASE, CONFIG_SYS_FSL_IFC_BASE1,
97 CONFIG_SYS_FSL_IFC_SIZE1,
98 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
101 { CONFIG_SYS_FSL_DRAM_BASE1, CONFIG_SYS_FSL_DRAM_BASE1,
102 CONFIG_SYS_FSL_DRAM_SIZE1,
103 #if defined(CONFIG_TFABOOT) || \
104 (defined(CONFIG_SPL) && !defined(CONFIG_SPL_BUILD))
105 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
106 #else /* Start with nGnRnE and PXN and UXN to prevent speculative access */
107 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN |
109 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
111 #ifdef CONFIG_FSL_IFC
112 /* Map IFC region #2 up to CONFIG_SYS_FLASH_BASE for NAND boot */
113 { CONFIG_SYS_FSL_IFC_BASE2, CONFIG_SYS_FSL_IFC_BASE2,
114 CONFIG_SYS_FLASH_BASE - CONFIG_SYS_FSL_IFC_BASE2,
115 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
118 { CONFIG_SYS_FSL_DCSR_BASE, CONFIG_SYS_FSL_DCSR_BASE,
119 CONFIG_SYS_FSL_DCSR_SIZE,
120 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
121 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
123 { CONFIG_SYS_FSL_DRAM_BASE2, CONFIG_SYS_FSL_DRAM_BASE2,
124 CONFIG_SYS_FSL_DRAM_SIZE2,
125 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN |
126 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
128 #ifdef CONFIG_SYS_FSL_DRAM_BASE3
129 { CONFIG_SYS_FSL_DRAM_BASE3, CONFIG_SYS_FSL_DRAM_BASE3,
130 CONFIG_SYS_FSL_DRAM_SIZE3,
131 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN |
132 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
135 #elif defined(CONFIG_FSL_LSCH2)
136 { CONFIG_SYS_FSL_CCSR_BASE, CONFIG_SYS_FSL_CCSR_BASE,
137 CONFIG_SYS_FSL_CCSR_SIZE,
138 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
139 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
141 { CONFIG_SYS_FSL_OCRAM_BASE, CONFIG_SYS_FSL_OCRAM_BASE,
142 SYS_FSL_OCRAM_SPACE_SIZE,
143 PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
145 { CONFIG_SYS_FSL_DCSR_BASE, CONFIG_SYS_FSL_DCSR_BASE,
146 CONFIG_SYS_FSL_DCSR_SIZE,
147 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
148 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
150 { CONFIG_SYS_FSL_QSPI_BASE, CONFIG_SYS_FSL_QSPI_BASE,
151 CONFIG_SYS_FSL_QSPI_SIZE,
152 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
154 #ifdef CONFIG_FSL_IFC
155 { CONFIG_SYS_FSL_IFC_BASE, CONFIG_SYS_FSL_IFC_BASE,
156 CONFIG_SYS_FSL_IFC_SIZE,
157 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
160 { CONFIG_SYS_FSL_DRAM_BASE1, CONFIG_SYS_FSL_DRAM_BASE1,
161 CONFIG_SYS_FSL_DRAM_SIZE1,
162 #if defined(CONFIG_TFABOOT) || \
163 (defined(CONFIG_SPL) && !defined(CONFIG_SPL_BUILD))
164 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
165 #else /* Start with nGnRnE and PXN and UXN to prevent speculative access */
166 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN |
168 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
170 { CONFIG_SYS_FSL_DRAM_BASE2, CONFIG_SYS_FSL_DRAM_BASE2,
171 CONFIG_SYS_FSL_DRAM_SIZE2,
172 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN |
173 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
176 {}, /* list terminator */
179 static struct mm_region final_map[] = {
180 #ifdef CONFIG_FSL_LSCH3
181 { CONFIG_SYS_FSL_CCSR_BASE, CONFIG_SYS_FSL_CCSR_BASE,
182 CONFIG_SYS_FSL_CCSR_SIZE,
183 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
184 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
186 { CONFIG_SYS_FSL_OCRAM_BASE, CONFIG_SYS_FSL_OCRAM_BASE,
187 SYS_FSL_OCRAM_SPACE_SIZE,
188 PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
190 { CONFIG_SYS_FSL_DRAM_BASE1, CONFIG_SYS_FSL_DRAM_BASE1,
191 CONFIG_SYS_FSL_DRAM_SIZE1,
192 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
193 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
195 { CONFIG_SYS_FSL_QSPI_BASE1, CONFIG_SYS_FSL_QSPI_BASE1,
196 CONFIG_SYS_FSL_QSPI_SIZE1,
197 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
198 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
200 { CONFIG_SYS_FSL_QSPI_BASE2, CONFIG_SYS_FSL_QSPI_BASE2,
201 CONFIG_SYS_FSL_QSPI_SIZE2,
202 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
203 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
205 #ifdef CONFIG_FSL_IFC
206 { CONFIG_SYS_FSL_IFC_BASE2, CONFIG_SYS_FSL_IFC_BASE2,
207 CONFIG_SYS_FSL_IFC_SIZE2,
208 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
209 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
212 { CONFIG_SYS_FSL_DCSR_BASE, CONFIG_SYS_FSL_DCSR_BASE,
213 CONFIG_SYS_FSL_DCSR_SIZE,
214 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
215 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
217 { CONFIG_SYS_FSL_MC_BASE, CONFIG_SYS_FSL_MC_BASE,
218 CONFIG_SYS_FSL_MC_SIZE,
219 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
220 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
222 { CONFIG_SYS_FSL_NI_BASE, CONFIG_SYS_FSL_NI_BASE,
223 CONFIG_SYS_FSL_NI_SIZE,
224 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
225 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
227 /* For QBMAN portal, only the first 64MB is cache-enabled */
228 { CONFIG_SYS_FSL_QBMAN_BASE, CONFIG_SYS_FSL_QBMAN_BASE,
229 CONFIG_SYS_FSL_QBMAN_SIZE_1,
230 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
231 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN | PTE_BLOCK_NS
233 { CONFIG_SYS_FSL_QBMAN_BASE + CONFIG_SYS_FSL_QBMAN_SIZE_1,
234 CONFIG_SYS_FSL_QBMAN_BASE + CONFIG_SYS_FSL_QBMAN_SIZE_1,
235 CONFIG_SYS_FSL_QBMAN_SIZE - CONFIG_SYS_FSL_QBMAN_SIZE_1,
236 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
237 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
239 { CONFIG_SYS_PCIE1_PHYS_ADDR, CONFIG_SYS_PCIE1_PHYS_ADDR,
240 CONFIG_SYS_PCIE1_PHYS_SIZE,
241 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
242 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
244 { CONFIG_SYS_PCIE2_PHYS_ADDR, CONFIG_SYS_PCIE2_PHYS_ADDR,
245 CONFIG_SYS_PCIE2_PHYS_SIZE,
246 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
247 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
249 { CONFIG_SYS_PCIE3_PHYS_ADDR, CONFIG_SYS_PCIE3_PHYS_ADDR,
250 CONFIG_SYS_PCIE3_PHYS_SIZE,
251 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
252 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
254 #ifdef CONFIG_SYS_PCIE4_PHYS_ADDR
255 { CONFIG_SYS_PCIE4_PHYS_ADDR, CONFIG_SYS_PCIE4_PHYS_ADDR,
256 CONFIG_SYS_PCIE4_PHYS_SIZE,
257 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
258 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
261 { CONFIG_SYS_FSL_WRIOP1_BASE, CONFIG_SYS_FSL_WRIOP1_BASE,
262 CONFIG_SYS_FSL_WRIOP1_SIZE,
263 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
264 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
266 { CONFIG_SYS_FSL_AIOP1_BASE, CONFIG_SYS_FSL_AIOP1_BASE,
267 CONFIG_SYS_FSL_AIOP1_SIZE,
268 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
269 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
271 { CONFIG_SYS_FSL_PEBUF_BASE, CONFIG_SYS_FSL_PEBUF_BASE,
272 CONFIG_SYS_FSL_PEBUF_SIZE,
273 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
274 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
276 { CONFIG_SYS_FSL_DRAM_BASE2, CONFIG_SYS_FSL_DRAM_BASE2,
277 CONFIG_SYS_FSL_DRAM_SIZE2,
278 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
279 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
281 #ifdef CONFIG_SYS_FSL_DRAM_BASE3
282 { CONFIG_SYS_FSL_DRAM_BASE3, CONFIG_SYS_FSL_DRAM_BASE3,
283 CONFIG_SYS_FSL_DRAM_SIZE3,
284 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
285 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
288 #elif defined(CONFIG_FSL_LSCH2)
289 { CONFIG_SYS_FSL_BOOTROM_BASE, CONFIG_SYS_FSL_BOOTROM_BASE,
290 CONFIG_SYS_FSL_BOOTROM_SIZE,
291 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
292 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
294 { CONFIG_SYS_FSL_CCSR_BASE, CONFIG_SYS_FSL_CCSR_BASE,
295 CONFIG_SYS_FSL_CCSR_SIZE,
296 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
297 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
299 { CONFIG_SYS_FSL_OCRAM_BASE, CONFIG_SYS_FSL_OCRAM_BASE,
300 SYS_FSL_OCRAM_SPACE_SIZE,
301 PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
303 { CONFIG_SYS_FSL_DCSR_BASE, CONFIG_SYS_FSL_DCSR_BASE,
304 CONFIG_SYS_FSL_DCSR_SIZE,
305 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
306 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
308 { CONFIG_SYS_FSL_QSPI_BASE, CONFIG_SYS_FSL_QSPI_BASE,
309 CONFIG_SYS_FSL_QSPI_SIZE,
310 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
311 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
313 #ifdef CONFIG_FSL_IFC
314 { CONFIG_SYS_FSL_IFC_BASE, CONFIG_SYS_FSL_IFC_BASE,
315 CONFIG_SYS_FSL_IFC_SIZE,
316 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
319 { CONFIG_SYS_FSL_DRAM_BASE1, CONFIG_SYS_FSL_DRAM_BASE1,
320 CONFIG_SYS_FSL_DRAM_SIZE1,
321 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
322 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
324 { CONFIG_SYS_FSL_QBMAN_BASE, CONFIG_SYS_FSL_QBMAN_BASE,
325 CONFIG_SYS_FSL_QBMAN_SIZE,
326 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
327 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
329 { CONFIG_SYS_FSL_DRAM_BASE2, CONFIG_SYS_FSL_DRAM_BASE2,
330 CONFIG_SYS_FSL_DRAM_SIZE2,
331 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
332 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
334 { CONFIG_SYS_PCIE1_PHYS_ADDR, CONFIG_SYS_PCIE1_PHYS_ADDR,
335 CONFIG_SYS_PCIE1_PHYS_SIZE,
336 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
337 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
339 { CONFIG_SYS_PCIE2_PHYS_ADDR, CONFIG_SYS_PCIE2_PHYS_ADDR,
340 CONFIG_SYS_PCIE2_PHYS_SIZE,
341 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
342 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
344 { CONFIG_SYS_PCIE3_PHYS_ADDR, CONFIG_SYS_PCIE3_PHYS_ADDR,
345 CONFIG_SYS_PCIE3_PHYS_SIZE,
346 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
347 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
349 { CONFIG_SYS_FSL_DRAM_BASE3, CONFIG_SYS_FSL_DRAM_BASE3,
350 CONFIG_SYS_FSL_DRAM_SIZE3,
351 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
352 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
355 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
356 {}, /* space holder for secure mem */
361 struct mm_region *mem_map = early_map;
363 void cpu_name(char *name)
365 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
366 unsigned int i, svr, ver;
368 svr = gur_in32(&gur->svr);
369 ver = SVR_SOC_VER(svr);
371 for (i = 0; i < ARRAY_SIZE(cpu_type_list); i++)
372 if ((cpu_type_list[i].soc_ver & SVR_WO_E) == ver) {
373 strcpy(name, cpu_type_list[i].name);
374 #ifdef CONFIG_ARCH_LX2160A
375 if (IS_C_PROCESSOR(svr))
379 if (IS_E_PROCESSOR(svr))
382 sprintf(name + strlen(name), " Rev%d.%d",
383 SVR_MAJ(svr), SVR_MIN(svr));
387 if (i == ARRAY_SIZE(cpu_type_list))
388 strcpy(name, "unknown");
391 #if !CONFIG_IS_ENABLED(SYS_DCACHE_OFF)
393 * To start MMU before DDR is available, we create MMU table in SRAM.
394 * The base address of SRAM is CONFIG_SYS_FSL_OCRAM_BASE. We use three
395 * levels of translation tables here to cover 40-bit address space.
396 * We use 4KB granule size, with 40 bits physical address, T0SZ=24
397 * Address above EARLY_PGTABLE_SIZE (0x5000) is free for other purpose.
398 * Note, the debug print in cache_v8.c is not usable for debugging
399 * these early MMU tables because UART is not yet available.
401 static inline void early_mmu_setup(void)
403 unsigned int el = current_el();
405 /* global data is already setup, no allocation yet */
407 gd->arch.tlb_addr = CONFIG_SYS_FSL_OCRAM_BASE;
409 gd->arch.tlb_addr = CONFIG_SYS_DDR_SDRAM_BASE;
410 gd->arch.tlb_fillptr = gd->arch.tlb_addr;
411 gd->arch.tlb_size = EARLY_PGTABLE_SIZE;
413 /* Create early page tables */
416 /* point TTBR to the new table */
417 set_ttbr_tcr_mair(el, gd->arch.tlb_addr,
418 get_tcr(el, NULL, NULL) &
419 ~(TCR_ORGN_MASK | TCR_IRGN_MASK),
422 set_sctlr(get_sctlr() | CR_M);
425 static void fix_pcie_mmu_map(void)
427 #ifdef CONFIG_ARCH_LS2080A
430 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
432 svr = gur_in32(&gur->svr);
433 ver = SVR_SOC_VER(svr);
435 /* Fix PCIE base and size for LS2088A */
436 if ((ver == SVR_LS2088A) || (ver == SVR_LS2084A) ||
437 (ver == SVR_LS2048A) || (ver == SVR_LS2044A) ||
438 (ver == SVR_LS2081A) || (ver == SVR_LS2041A)) {
439 for (i = 0; i < ARRAY_SIZE(final_map); i++) {
440 switch (final_map[i].phys) {
441 case CONFIG_SYS_PCIE1_PHYS_ADDR:
442 final_map[i].phys = 0x2000000000ULL;
443 final_map[i].virt = 0x2000000000ULL;
444 final_map[i].size = 0x800000000ULL;
446 case CONFIG_SYS_PCIE2_PHYS_ADDR:
447 final_map[i].phys = 0x2800000000ULL;
448 final_map[i].virt = 0x2800000000ULL;
449 final_map[i].size = 0x800000000ULL;
451 case CONFIG_SYS_PCIE3_PHYS_ADDR:
452 final_map[i].phys = 0x3000000000ULL;
453 final_map[i].virt = 0x3000000000ULL;
454 final_map[i].size = 0x800000000ULL;
456 #ifdef CONFIG_SYS_PCIE4_PHYS_ADDR
457 case CONFIG_SYS_PCIE4_PHYS_ADDR:
458 final_map[i].phys = 0x3800000000ULL;
459 final_map[i].virt = 0x3800000000ULL;
460 final_map[i].size = 0x800000000ULL;
472 * The final tables look similar to early tables, but different in detail.
473 * These tables are in DRAM. Sub tables are added to enable cache for
476 * Put the MMU table in secure memory if gd->arch.secure_ram is valid.
477 * OCRAM will be not used for this purpose so gd->arch.secure_ram can't be 0.
479 static inline void final_mmu_setup(void)
481 u64 tlb_addr_save = gd->arch.tlb_addr;
482 unsigned int el = current_el();
485 /* fix the final_map before filling in the block entries */
490 /* Update mapping for DDR to actual size */
491 for (index = 0; index < ARRAY_SIZE(final_map) - 2; index++) {
493 * Find the entry for DDR mapping and update the address and
494 * size. Zero-sized mapping will be skipped when creating MMU
497 switch (final_map[index].virt) {
498 case CONFIG_SYS_FSL_DRAM_BASE1:
499 final_map[index].virt = gd->bd->bi_dram[0].start;
500 final_map[index].phys = gd->bd->bi_dram[0].start;
501 final_map[index].size = gd->bd->bi_dram[0].size;
503 #ifdef CONFIG_SYS_FSL_DRAM_BASE2
504 case CONFIG_SYS_FSL_DRAM_BASE2:
505 #if (CONFIG_NR_DRAM_BANKS >= 2)
506 final_map[index].virt = gd->bd->bi_dram[1].start;
507 final_map[index].phys = gd->bd->bi_dram[1].start;
508 final_map[index].size = gd->bd->bi_dram[1].size;
510 final_map[index].size = 0;
514 #ifdef CONFIG_SYS_FSL_DRAM_BASE3
515 case CONFIG_SYS_FSL_DRAM_BASE3:
516 #if (CONFIG_NR_DRAM_BANKS >= 3)
517 final_map[index].virt = gd->bd->bi_dram[2].start;
518 final_map[index].phys = gd->bd->bi_dram[2].start;
519 final_map[index].size = gd->bd->bi_dram[2].size;
521 final_map[index].size = 0;
530 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
531 if (gd->arch.secure_ram & MEM_RESERVE_SECURE_MAINTAINED) {
534 * Only use gd->arch.secure_ram if the address is
535 * recalculated. Align to 4KB for MMU table.
537 /* put page tables in secure ram */
538 index = ARRAY_SIZE(final_map) - 2;
539 gd->arch.tlb_addr = gd->arch.secure_ram & ~0xfff;
540 final_map[index].virt = gd->arch.secure_ram & ~0x3;
541 final_map[index].phys = final_map[index].virt;
542 final_map[index].size = CONFIG_SYS_MEM_RESERVE_SECURE;
543 final_map[index].attrs = PTE_BLOCK_OUTER_SHARE;
544 gd->arch.secure_ram |= MEM_RESERVE_SECURE_SECURED;
545 tlb_addr_save = gd->arch.tlb_addr;
547 /* Use allocated (board_f.c) memory for TLB */
548 tlb_addr_save = gd->arch.tlb_allocated;
549 gd->arch.tlb_addr = tlb_addr_save;
554 /* Reset the fill ptr */
555 gd->arch.tlb_fillptr = tlb_addr_save;
557 /* Create normal system page tables */
560 /* Create emergency page tables */
561 gd->arch.tlb_addr = gd->arch.tlb_fillptr;
562 gd->arch.tlb_emerg = gd->arch.tlb_addr;
564 gd->arch.tlb_addr = tlb_addr_save;
566 /* Disable cache and MMU */
567 dcache_disable(); /* TLBs are invalidated */
568 invalidate_icache_all();
570 /* point TTBR to the new table */
571 set_ttbr_tcr_mair(el, gd->arch.tlb_addr, get_tcr(el, NULL, NULL),
574 set_sctlr(get_sctlr() | CR_M);
577 u64 get_page_table_size(void)
582 int arch_cpu_init(void)
585 * This function is called before U-Boot relocates itself to speed up
586 * on system running. It is not necessary to run if performance is not
587 * critical. Skip if MMU is already enabled by SPL or other means.
589 if (get_sctlr() & CR_M)
593 __asm_invalidate_dcache_all();
594 __asm_invalidate_tlb_all();
596 set_sctlr(get_sctlr() | CR_C);
606 * This function is called from common/board_r.c.
607 * It recreates MMU table in main memory.
609 void enable_caches(void)
612 __asm_invalidate_tlb_all();
616 #endif /* !CONFIG_IS_ENABLED(SYS_DCACHE_OFF) */
618 #ifdef CONFIG_TFABOOT
619 enum boot_src __get_boot_src(u32 porsr1)
621 enum boot_src src = BOOT_SOURCE_RESERVED;
622 u32 rcw_src = (porsr1 & RCW_SRC_MASK) >> RCW_SRC_BIT;
623 #if !defined(CONFIG_NXP_LSCH3_2)
626 debug("%s: rcw_src 0x%x\n", __func__, rcw_src);
628 #if defined(CONFIG_FSL_LSCH3)
629 #if defined(CONFIG_NXP_LSCH3_2)
631 case RCW_SRC_SDHC1_VAL:
632 src = BOOT_SOURCE_SD_MMC;
634 case RCW_SRC_SDHC2_VAL:
635 src = BOOT_SOURCE_SD_MMC2;
637 case RCW_SRC_I2C1_VAL:
638 src = BOOT_SOURCE_I2C1_EXTENDED;
640 case RCW_SRC_FLEXSPI_NAND2K_VAL:
641 src = BOOT_SOURCE_XSPI_NAND;
643 case RCW_SRC_FLEXSPI_NAND4K_VAL:
644 src = BOOT_SOURCE_XSPI_NAND;
646 case RCW_SRC_RESERVED_1_VAL:
647 src = BOOT_SOURCE_RESERVED;
649 case RCW_SRC_FLEXSPI_NOR_24B:
650 src = BOOT_SOURCE_XSPI_NOR;
653 src = BOOT_SOURCE_RESERVED;
656 val = rcw_src & RCW_SRC_TYPE_MASK;
657 if (val == RCW_SRC_NOR_VAL) {
658 val = rcw_src & NOR_TYPE_MASK;
663 src = BOOT_SOURCE_IFC_NOR;
666 src = BOOT_SOURCE_RESERVED;
669 /* RCW SRC Serial Flash */
670 val = rcw_src & RCW_SRC_SERIAL_MASK;
672 case RCW_SRC_QSPI_VAL:
673 /* RCW SRC Serial NOR (QSPI) */
674 src = BOOT_SOURCE_QSPI_NOR;
676 case RCW_SRC_SD_CARD_VAL:
677 /* RCW SRC SD Card */
678 src = BOOT_SOURCE_SD_MMC;
680 case RCW_SRC_EMMC_VAL:
682 src = BOOT_SOURCE_SD_MMC;
684 case RCW_SRC_I2C1_VAL:
685 /* RCW SRC I2C1 Extended */
686 src = BOOT_SOURCE_I2C1_EXTENDED;
689 src = BOOT_SOURCE_RESERVED;
693 #elif defined(CONFIG_FSL_LSCH2)
695 val = rcw_src & RCW_SRC_NAND_MASK;
696 if (val == RCW_SRC_NAND_VAL) {
697 val = rcw_src & NAND_RESERVED_MASK;
698 if (val != NAND_RESERVED_1 && val != NAND_RESERVED_2)
699 src = BOOT_SOURCE_IFC_NAND;
703 val = rcw_src & RCW_SRC_NOR_MASK;
704 if (val == NOR_8B_VAL || val == NOR_16B_VAL) {
705 src = BOOT_SOURCE_IFC_NOR;
710 src = BOOT_SOURCE_QSPI_NOR;
713 src = BOOT_SOURCE_SD_MMC;
716 src = BOOT_SOURCE_RESERVED;
722 if (CONFIG_IS_ENABLED(SYS_FSL_ERRATUM_A010539) && !rcw_src)
723 src = BOOT_SOURCE_QSPI_NOR;
725 debug("%s: src 0x%x\n", __func__, src);
729 enum boot_src get_boot_src(void)
734 #if defined(CONFIG_FSL_LSCH3)
735 u32 __iomem *dcfg_ccsr = (u32 __iomem *)DCFG_BASE;
736 #elif defined(CONFIG_FSL_LSCH2)
737 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
740 if (current_el() == 2) {
741 regs.regs[0] = SIP_SVC_RCW;
745 porsr1 = regs.regs[1];
748 if (current_el() == 3 || !porsr1) {
749 #ifdef CONFIG_FSL_LSCH3
750 porsr1 = in_le32(dcfg_ccsr + DCFG_PORSR1 / 4);
751 #elif defined(CONFIG_FSL_LSCH2)
752 porsr1 = in_be32(&gur->porsr1);
756 debug("%s: porsr1 0x%x\n", __func__, porsr1);
758 return __get_boot_src(porsr1);
761 #ifdef CONFIG_ENV_IS_IN_MMC
762 int mmc_get_env_dev(void)
764 enum boot_src src = get_boot_src();
765 int dev = CONFIG_SYS_MMC_ENV_DEV;
768 case BOOT_SOURCE_SD_MMC:
771 case BOOT_SOURCE_SD_MMC2:
782 enum env_location env_get_location(enum env_operation op, int prio)
784 enum boot_src src = get_boot_src();
785 enum env_location env_loc = ENVL_NOWHERE;
790 #ifdef CONFIG_CHAIN_OF_TRUST
792 * If Boot Mode is Secure, return ENVL_NOWHERE
794 if (fsl_check_boot_mode_secure() == 1)
799 case BOOT_SOURCE_IFC_NOR:
800 env_loc = ENVL_FLASH;
802 case BOOT_SOURCE_QSPI_NOR:
804 case BOOT_SOURCE_XSPI_NOR:
805 env_loc = ENVL_SPI_FLASH;
807 case BOOT_SOURCE_IFC_NAND:
809 case BOOT_SOURCE_QSPI_NAND:
811 case BOOT_SOURCE_XSPI_NAND:
814 case BOOT_SOURCE_SD_MMC:
816 case BOOT_SOURCE_SD_MMC2:
819 case BOOT_SOURCE_I2C1_EXTENDED:
825 #ifdef CONFIG_CHAIN_OF_TRUST
830 #endif /* CONFIG_TFABOOT */
832 u32 initiator_type(u32 cluster, int init_id)
834 struct ccsr_gur *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
835 u32 idx = (cluster >> (init_id * 8)) & TP_CLUSTER_INIT_MASK;
838 type = gur_in32(&gur->tp_ityp[idx]);
839 if (type & TP_ITYP_AV)
845 u32 cpu_pos_mask(void)
847 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
849 u32 cluster, type, mask = 0;
854 cluster = gur_in32(&gur->tp_cluster[i].lower);
855 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
856 type = initiator_type(cluster, j);
857 if (type && (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM))
858 mask |= 1 << (i * TP_INIT_PER_CLUSTER + j);
861 } while ((cluster & TP_CLUSTER_EOC) == 0x0);
868 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
869 int i = 0, count = 0;
870 u32 cluster, type, mask = 0;
875 cluster = gur_in32(&gur->tp_cluster[i].lower);
876 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
877 type = initiator_type(cluster, j);
879 if (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM)
885 } while ((cluster & TP_CLUSTER_EOC) == 0x0);
891 * Return the number of cores on this SOC.
893 int cpu_numcores(void)
895 return hweight32(cpu_mask());
898 int fsl_qoriq_core_to_cluster(unsigned int core)
900 struct ccsr_gur __iomem *gur =
901 (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR);
902 int i = 0, count = 0;
908 cluster = gur_in32(&gur->tp_cluster[i].lower);
909 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
910 if (initiator_type(cluster, j)) {
917 } while ((cluster & TP_CLUSTER_EOC) == 0x0);
919 return -1; /* cannot identify the cluster */
922 u32 fsl_qoriq_core_to_type(unsigned int core)
924 struct ccsr_gur __iomem *gur =
925 (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR);
926 int i = 0, count = 0;
932 cluster = gur_in32(&gur->tp_cluster[i].lower);
933 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
934 type = initiator_type(cluster, j);
942 } while ((cluster & TP_CLUSTER_EOC) == 0x0);
944 return -1; /* cannot identify the cluster */
947 #ifndef CONFIG_FSL_LSCH3
950 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
952 return gur_in32(&gur->svr);
956 #ifdef CONFIG_DISPLAY_CPUINFO
957 int print_cpuinfo(void)
959 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
960 struct sys_info sysinfo;
962 unsigned int i, core;
963 u32 type, rcw, svr = gur_in32(&gur->svr);
968 printf(" %s (0x%x)\n", buf, svr);
969 memset((u8 *)buf, 0x00, ARRAY_SIZE(buf));
970 get_sys_info(&sysinfo);
971 puts("Clock Configuration:");
972 for_each_cpu(i, core, cpu_numcores(), cpu_mask()) {
975 type = TP_ITYP_VER(fsl_qoriq_core_to_type(core));
976 printf("CPU%d(%s):%-4s MHz ", core,
977 type == TY_ITYP_VER_A7 ? "A7 " :
978 (type == TY_ITYP_VER_A53 ? "A53" :
979 (type == TY_ITYP_VER_A57 ? "A57" :
980 (type == TY_ITYP_VER_A72 ? "A72" : " "))),
981 strmhz(buf, sysinfo.freq_processor[core]));
983 /* Display platform clock as Bus frequency. */
984 printf("\n Bus: %-4s MHz ",
985 strmhz(buf, sysinfo.freq_systembus / CONFIG_SYS_FSL_PCLK_DIV));
986 printf("DDR: %-4s MT/s", strmhz(buf, sysinfo.freq_ddrbus));
987 #ifdef CONFIG_SYS_DPAA_FMAN
988 printf(" FMAN: %-4s MHz", strmhz(buf, sysinfo.freq_fman[0]));
990 #ifdef CONFIG_SYS_FSL_HAS_DP_DDR
991 if (soc_has_dp_ddr()) {
992 printf(" DP-DDR: %-4s MT/s",
993 strmhz(buf, sysinfo.freq_ddrbus2));
999 * Display the RCW, so that no one gets confused as to what RCW
1000 * we're actually using for this boot.
1002 puts("Reset Configuration Word (RCW):");
1003 for (i = 0; i < ARRAY_SIZE(gur->rcwsr); i++) {
1004 rcw = gur_in32(&gur->rcwsr[i]);
1006 printf("\n %08x:", i * 4);
1007 printf(" %08x", rcw);
1015 #ifdef CONFIG_FSL_ESDHC
1016 int cpu_mmc_init(bd_t *bis)
1018 return fsl_esdhc_mmc_init(bis);
1022 int cpu_eth_init(bd_t *bis)
1026 #if defined(CONFIG_FSL_MC_ENET) && !defined(CONFIG_SPL_BUILD)
1027 error = fsl_mc_ldpaa_init(bis);
1029 #ifdef CONFIG_FMAN_ENET
1030 fm_standard_init(bis);
1035 static inline int check_psci(void)
1037 unsigned int psci_ver;
1039 psci_ver = sec_firmware_support_psci_version();
1040 if (psci_ver == PSCI_INVALID_VER)
1046 static void config_core_prefetch(void)
1049 char buffer[HWCONFIG_BUFFER_SIZE];
1050 const char *prefetch_arg = NULL;
1053 struct pt_regs regs;
1055 if (env_get_f("hwconfig", buffer, sizeof(buffer)) > 0)
1058 prefetch_arg = hwconfig_subarg_f("core_prefetch", "disable",
1062 mask = simple_strtoul(prefetch_arg, NULL, 0) & 0xff;
1064 printf("Core0 prefetch can't be disabled\n");
1068 #define SIP_PREFETCH_DISABLE_64 0xC200FF13
1069 regs.regs[0] = SIP_PREFETCH_DISABLE_64;
1070 regs.regs[1] = mask;
1074 printf("Prefetch disable config failed for mask ");
1076 printf("Prefetch disable config passed for mask ");
1077 printf("0x%x\n", mask);
1081 int arch_early_init_r(void)
1083 #ifdef CONFIG_SYS_FSL_ERRATUM_A009635
1086 * erratum A009635 is valid only for LS2080A SoC and
1087 * its personalitiesi
1089 svr_dev_id = get_svr();
1090 if (IS_SVR_DEV(svr_dev_id, SVR_DEV(SVR_LS2080A)))
1093 #if defined(CONFIG_SYS_FSL_ERRATUM_A009942) && defined(CONFIG_SYS_FSL_DDR)
1094 erratum_a009942_check_cpo();
1097 debug("PSCI: PSCI does not exist.\n");
1099 /* if PSCI does not exist, boot secondary cores here */
1100 if (fsl_layerscape_wake_seconday_cores())
1101 printf("Did not wake secondary cores\n");
1104 config_core_prefetch();
1106 #ifdef CONFIG_SYS_HAS_SERDES
1109 #ifdef CONFIG_SYS_FSL_HAS_RGMII
1110 /* some dpmacs in armv8a based freescale layerscape SOCs can be
1111 * configured via both serdes(sgmii, xfi, xlaui etc) bits and via
1112 * EC*_PMUX(rgmii) bits in RCW.
1113 * e.g. dpmac 17 and 18 in LX2160A can be configured as SGMII from
1114 * serdes bits and as RGMII via EC1_PMUX/EC2_PMUX bits
1115 * Now if a dpmac is enabled by serdes bits then it takes precedence
1116 * over EC*_PMUX bits. i.e. in LX2160A if we select serdes protocol
1117 * that configures dpmac17 as SGMII and set the EC1_PMUX as RGMII,
1118 * then the dpmac is SGMII and not RGMII.
1120 * Therefore, move the fsl_rgmii_init after fsl_serdes_init. in
1121 * fsl_rgmii_init function of SOC, we will check if the dpmac is enabled
1122 * or not? if it is (fsl_serdes_init has already enabled the dpmac),
1123 * then don't enable it.
1127 #ifdef CONFIG_FMAN_ENET
1130 #ifdef CONFIG_SYS_DPAA_QBMAN
1131 setup_qbman_portals();
1136 int timer_init(void)
1138 u32 __iomem *cntcr = (u32 *)CONFIG_SYS_FSL_TIMER_ADDR;
1139 #ifdef CONFIG_FSL_LSCH3
1140 u32 __iomem *cltbenr = (u32 *)CONFIG_SYS_FSL_PMU_CLTBENR;
1142 #if defined(CONFIG_ARCH_LS2080A) || defined(CONFIG_ARCH_LS1088A)
1143 u32 __iomem *pctbenr = (u32 *)FSL_PMU_PCTBENR_OFFSET;
1146 #ifdef COUNTER_FREQUENCY_REAL
1147 unsigned long cntfrq = COUNTER_FREQUENCY_REAL;
1149 /* Update with accurate clock frequency */
1150 if (current_el() == 3)
1151 asm volatile("msr cntfrq_el0, %0" : : "r" (cntfrq) : "memory");
1154 #ifdef CONFIG_FSL_LSCH3
1155 /* Enable timebase for all clusters.
1156 * It is safe to do so even some clusters are not enabled.
1158 out_le32(cltbenr, 0xf);
1161 #if defined(CONFIG_ARCH_LS2080A) || defined(CONFIG_ARCH_LS1088A)
1163 * In certain Layerscape SoCs, the clock for each core's
1164 * has an enable bit in the PMU Physical Core Time Base Enable
1165 * Register (PCTBENR), which allows the watchdog to operate.
1167 setbits_le32(pctbenr, 0xff);
1169 * For LS2080A SoC and its personalities, timer controller
1170 * offset is different
1172 svr_dev_id = get_svr();
1173 if (IS_SVR_DEV(svr_dev_id, SVR_DEV(SVR_LS2080A)))
1174 cntcr = (u32 *)SYS_FSL_LS2080A_LS2085A_TIMER_ADDR;
1178 /* Enable clock for timer
1179 * This is a global setting.
1181 out_le32(cntcr, 0x1);
1186 __efi_runtime_data u32 __iomem *rstcr = (u32 *)CONFIG_SYS_FSL_RST_ADDR;
1188 void __efi_runtime reset_cpu(ulong addr)
1192 #ifdef CONFIG_ARCH_LX2160A
1193 val = in_le32(rstcr);
1195 out_le32(rstcr, val);
1197 /* Raise RESET_REQ_B */
1198 val = scfg_in32(rstcr);
1200 scfg_out32(rstcr, val);
1204 #ifdef CONFIG_EFI_LOADER
1206 void __efi_runtime EFIAPI efi_reset_system(
1207 enum efi_reset_type reset_type,
1208 efi_status_t reset_status,
1209 unsigned long data_size, void *reset_data)
1211 switch (reset_type) {
1212 case EFI_RESET_COLD:
1213 case EFI_RESET_WARM:
1214 case EFI_RESET_PLATFORM_SPECIFIC:
1217 case EFI_RESET_SHUTDOWN:
1218 /* Nothing we can do */
1225 efi_status_t efi_reset_system_init(void)
1227 return efi_add_runtime_mmio(&rstcr, sizeof(*rstcr));
1233 * Calculate reserved memory with given memory bank
1234 * Return aligned memory size on success
1235 * Return (ram_size + needed size) for failure
1237 phys_size_t board_reserve_ram_top(phys_size_t ram_size)
1239 phys_size_t ram_top = ram_size;
1241 #if defined(CONFIG_FSL_MC_ENET) && !defined(CONFIG_SPL_BUILD)
1242 ram_top = mc_get_dram_block_size();
1243 if (ram_top > ram_size)
1244 return ram_size + ram_top;
1246 ram_top = ram_size - ram_top;
1247 /* The start address of MC reserved memory needs to be aligned. */
1248 ram_top &= ~(CONFIG_SYS_MC_RSV_MEM_ALIGN - 1);
1251 return ram_size - ram_top;
1254 phys_size_t get_effective_memsize(void)
1256 phys_size_t ea_size, rem = 0;
1259 * For ARMv8 SoCs, DDR memory is split into two or three regions. The
1260 * first region is 2GB space at 0x8000_0000. Secure memory needs to
1261 * allocated from first region. If the memory extends to the second
1262 * region (or the third region if applicable), Management Complex (MC)
1263 * memory should be put into the highest region, i.e. the end of DDR
1264 * memory. CONFIG_MAX_MEM_MAPPED is set to the size of first region so
1265 * U-Boot doesn't relocate itself into higher address. Should DDR be
1266 * configured to skip the first region, this function needs to be
1269 if (gd->ram_size > CONFIG_MAX_MEM_MAPPED) {
1270 ea_size = CONFIG_MAX_MEM_MAPPED;
1271 rem = gd->ram_size - ea_size;
1273 ea_size = gd->ram_size;
1276 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
1277 /* Check if we have enough space for secure memory */
1278 if (ea_size > CONFIG_SYS_MEM_RESERVE_SECURE)
1279 ea_size -= CONFIG_SYS_MEM_RESERVE_SECURE;
1281 printf("Error: No enough space for secure memory.\n");
1283 /* Check if we have enough memory for MC */
1284 if (rem < board_reserve_ram_top(rem)) {
1285 /* Not enough memory in high region to reserve */
1286 if (ea_size > board_reserve_ram_top(ea_size))
1287 ea_size -= board_reserve_ram_top(ea_size);
1289 printf("Error: No enough space for reserved memory.\n");
1295 #ifdef CONFIG_TFABOOT
1296 phys_size_t tfa_get_dram_size(void)
1298 struct pt_regs regs;
1299 phys_size_t dram_size = 0;
1301 regs.regs[0] = SMC_DRAM_BANK_INFO;
1308 dram_size = regs.regs[1];
1312 static int tfa_dram_init_banksize(void)
1315 struct pt_regs regs;
1316 phys_size_t dram_size = tfa_get_dram_size();
1318 debug("dram_size %llx\n", dram_size);
1324 regs.regs[0] = SMC_DRAM_BANK_INFO;
1333 debug("bank[%d]: start %lx, size %lx\n", i, regs.regs[1],
1335 gd->bd->bi_dram[i].start = regs.regs[1];
1336 gd->bd->bi_dram[i].size = regs.regs[2];
1338 dram_size -= gd->bd->bi_dram[i].size;
1341 } while (dram_size);
1346 #if defined(CONFIG_FSL_MC_ENET) && !defined(CONFIG_SPL_BUILD)
1347 /* Assign memory for MC */
1348 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
1349 if (gd->bd->bi_dram[2].size >=
1350 board_reserve_ram_top(gd->bd->bi_dram[2].size)) {
1351 gd->arch.resv_ram = gd->bd->bi_dram[2].start +
1352 gd->bd->bi_dram[2].size -
1353 board_reserve_ram_top(gd->bd->bi_dram[2].size);
1357 if (gd->bd->bi_dram[1].size >=
1358 board_reserve_ram_top(gd->bd->bi_dram[1].size)) {
1359 gd->arch.resv_ram = gd->bd->bi_dram[1].start +
1360 gd->bd->bi_dram[1].size -
1361 board_reserve_ram_top(gd->bd->bi_dram[1].size);
1362 } else if (gd->bd->bi_dram[0].size >
1363 board_reserve_ram_top(gd->bd->bi_dram[0].size)) {
1364 gd->arch.resv_ram = gd->bd->bi_dram[0].start +
1365 gd->bd->bi_dram[0].size -
1366 board_reserve_ram_top(gd->bd->bi_dram[0].size);
1369 #endif /* CONFIG_FSL_MC_ENET */
1375 int dram_init_banksize(void)
1377 #ifdef CONFIG_SYS_DP_DDR_BASE_PHY
1378 phys_size_t dp_ddr_size;
1381 #ifdef CONFIG_TFABOOT
1382 if (!tfa_dram_init_banksize())
1386 * gd->ram_size has the total size of DDR memory, less reserved secure
1387 * memory. The DDR extends from low region to high region(s) presuming
1388 * no hole is created with DDR configuration. gd->arch.secure_ram tracks
1389 * the location of secure memory. gd->arch.resv_ram tracks the location
1390 * of reserved memory for Management Complex (MC). Because gd->ram_size
1391 * is reduced by this function if secure memory is reserved, checking
1392 * gd->arch.secure_ram should be done to avoid running it repeatedly.
1395 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
1396 if (gd->arch.secure_ram & MEM_RESERVE_SECURE_MAINTAINED) {
1397 debug("No need to run again, skip %s\n", __func__);
1403 gd->bd->bi_dram[0].start = CONFIG_SYS_SDRAM_BASE;
1404 if (gd->ram_size > CONFIG_SYS_DDR_BLOCK1_SIZE) {
1405 gd->bd->bi_dram[0].size = CONFIG_SYS_DDR_BLOCK1_SIZE;
1406 gd->bd->bi_dram[1].start = CONFIG_SYS_DDR_BLOCK2_BASE;
1407 gd->bd->bi_dram[1].size = gd->ram_size -
1408 CONFIG_SYS_DDR_BLOCK1_SIZE;
1409 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
1410 if (gd->bi_dram[1].size > CONFIG_SYS_DDR_BLOCK2_SIZE) {
1411 gd->bd->bi_dram[2].start = CONFIG_SYS_DDR_BLOCK3_BASE;
1412 gd->bd->bi_dram[2].size = gd->bd->bi_dram[1].size -
1413 CONFIG_SYS_DDR_BLOCK2_SIZE;
1414 gd->bd->bi_dram[1].size = CONFIG_SYS_DDR_BLOCK2_SIZE;
1418 gd->bd->bi_dram[0].size = gd->ram_size;
1420 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
1421 if (gd->bd->bi_dram[0].size >
1422 CONFIG_SYS_MEM_RESERVE_SECURE) {
1423 gd->bd->bi_dram[0].size -=
1424 CONFIG_SYS_MEM_RESERVE_SECURE;
1425 gd->arch.secure_ram = gd->bd->bi_dram[0].start +
1426 gd->bd->bi_dram[0].size;
1427 gd->arch.secure_ram |= MEM_RESERVE_SECURE_MAINTAINED;
1428 gd->ram_size -= CONFIG_SYS_MEM_RESERVE_SECURE;
1430 #endif /* CONFIG_SYS_MEM_RESERVE_SECURE */
1432 #if defined(CONFIG_FSL_MC_ENET) && !defined(CONFIG_SPL_BUILD)
1433 /* Assign memory for MC */
1434 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
1435 if (gd->bd->bi_dram[2].size >=
1436 board_reserve_ram_top(gd->bd->bi_dram[2].size)) {
1437 gd->arch.resv_ram = gd->bd->bi_dram[2].start +
1438 gd->bd->bi_dram[2].size -
1439 board_reserve_ram_top(gd->bd->bi_dram[2].size);
1443 if (gd->bd->bi_dram[1].size >=
1444 board_reserve_ram_top(gd->bd->bi_dram[1].size)) {
1445 gd->arch.resv_ram = gd->bd->bi_dram[1].start +
1446 gd->bd->bi_dram[1].size -
1447 board_reserve_ram_top(gd->bd->bi_dram[1].size);
1448 } else if (gd->bd->bi_dram[0].size >
1449 board_reserve_ram_top(gd->bd->bi_dram[0].size)) {
1450 gd->arch.resv_ram = gd->bd->bi_dram[0].start +
1451 gd->bd->bi_dram[0].size -
1452 board_reserve_ram_top(gd->bd->bi_dram[0].size);
1455 #endif /* CONFIG_FSL_MC_ENET */
1457 #ifdef CONFIG_SYS_DP_DDR_BASE_PHY
1458 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
1459 #error "This SoC shouldn't have DP DDR"
1461 if (soc_has_dp_ddr()) {
1462 /* initialize DP-DDR here */
1465 * DDR controller use 0 as the base address for binding.
1466 * It is mapped to CONFIG_SYS_DP_DDR_BASE for core to access.
1468 dp_ddr_size = fsl_other_ddr_sdram(CONFIG_SYS_DP_DDR_BASE_PHY,
1470 CONFIG_DP_DDR_NUM_CTRLS,
1471 CONFIG_DP_DDR_DIMM_SLOTS_PER_CTLR,
1474 gd->bd->bi_dram[2].start = CONFIG_SYS_DP_DDR_BASE;
1475 gd->bd->bi_dram[2].size = dp_ddr_size;
1477 puts("Not detected");
1482 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
1483 debug("%s is called. gd->ram_size is reduced to %lu\n",
1484 __func__, (ulong)gd->ram_size);
1490 #if CONFIG_IS_ENABLED(EFI_LOADER)
1491 void efi_add_known_memory(void)
1494 phys_addr_t ram_start, start;
1495 phys_size_t ram_size;
1499 for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
1500 #ifdef CONFIG_SYS_DP_DDR_BASE_PHY
1501 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
1502 #error "This SoC shouldn't have DP DDR"
1505 continue; /* skip DP-DDR */
1507 ram_start = gd->bd->bi_dram[i].start;
1508 ram_size = gd->bd->bi_dram[i].size;
1509 #ifdef CONFIG_RESV_RAM
1510 if (gd->arch.resv_ram >= ram_start &&
1511 gd->arch.resv_ram < ram_start + ram_size)
1512 ram_size = gd->arch.resv_ram - ram_start;
1514 start = (ram_start + EFI_PAGE_MASK) & ~EFI_PAGE_MASK;
1515 pages = (ram_size + EFI_PAGE_MASK) >> EFI_PAGE_SHIFT;
1517 efi_add_memory_map(start, pages, EFI_CONVENTIONAL_MEMORY,
1524 * Before DDR size is known, early MMU table have DDR mapped as device memory
1525 * to avoid speculative access. To relocate U-Boot to DDR, "normal memory"
1526 * needs to be set for these mappings.
1527 * If a special case configures DDR with holes in the mapping, the holes need
1528 * to be marked as invalid. This is not implemented in this function.
1530 void update_early_mmu_table(void)
1532 if (!gd->arch.tlb_addr)
1535 if (gd->ram_size <= CONFIG_SYS_FSL_DRAM_SIZE1) {
1536 mmu_change_region_attr(
1537 CONFIG_SYS_SDRAM_BASE,
1539 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
1540 PTE_BLOCK_OUTER_SHARE |
1544 mmu_change_region_attr(
1545 CONFIG_SYS_SDRAM_BASE,
1546 CONFIG_SYS_DDR_BLOCK1_SIZE,
1547 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
1548 PTE_BLOCK_OUTER_SHARE |
1551 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
1552 #ifndef CONFIG_SYS_DDR_BLOCK2_SIZE
1553 #error "Missing CONFIG_SYS_DDR_BLOCK2_SIZE"
1555 if (gd->ram_size - CONFIG_SYS_DDR_BLOCK1_SIZE >
1556 CONFIG_SYS_DDR_BLOCK2_SIZE) {
1557 mmu_change_region_attr(
1558 CONFIG_SYS_DDR_BLOCK2_BASE,
1559 CONFIG_SYS_DDR_BLOCK2_SIZE,
1560 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
1561 PTE_BLOCK_OUTER_SHARE |
1564 mmu_change_region_attr(
1565 CONFIG_SYS_DDR_BLOCK3_BASE,
1567 CONFIG_SYS_DDR_BLOCK1_SIZE -
1568 CONFIG_SYS_DDR_BLOCK2_SIZE,
1569 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
1570 PTE_BLOCK_OUTER_SHARE |
1576 mmu_change_region_attr(
1577 CONFIG_SYS_DDR_BLOCK2_BASE,
1579 CONFIG_SYS_DDR_BLOCK1_SIZE,
1580 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
1581 PTE_BLOCK_OUTER_SHARE |
1588 __weak int dram_init(void)
1591 #if (!defined(CONFIG_SPL) && !defined(CONFIG_TFABOOT)) || \
1592 defined(CONFIG_SPL_BUILD)
1593 /* This will break-before-make MMU for DDR */
1594 update_early_mmu_table();