1 // SPDX-License-Identifier: GPL-2.0+
4 * Copyright 2014-2015 Freescale Semiconductor, Inc.
9 #include <fsl_ddr_sdram.h>
11 #include <linux/errno.h>
12 #include <asm/system.h>
14 #include <asm/armv8/mmu.h>
16 #include <asm/arch/fsl_serdes.h>
17 #include <asm/arch/soc.h>
18 #include <asm/arch/cpu.h>
19 #include <asm/arch/speed.h>
20 #include <fsl_immap.h>
21 #include <asm/arch/mp.h>
22 #include <efi_loader.h>
23 #include <fsl-mc/fsl_mc.h>
24 #ifdef CONFIG_FSL_ESDHC
25 #include <fsl_esdhc.h>
27 #include <asm/armv8/sec_firmware.h>
28 #ifdef CONFIG_SYS_FSL_DDR
31 #include <asm/arch/clock.h>
33 #include <fsl_qbman.h>
36 #include <env_internal.h>
37 #ifdef CONFIG_CHAIN_OF_TRUST
38 #include <fsl_validate.h>
42 DECLARE_GLOBAL_DATA_PTR;
44 static struct cpu_type cpu_type_list[] = {
45 CPU_TYPE_ENTRY(LS2080A, LS2080A, 8),
46 CPU_TYPE_ENTRY(LS2085A, LS2085A, 8),
47 CPU_TYPE_ENTRY(LS2045A, LS2045A, 4),
48 CPU_TYPE_ENTRY(LS2088A, LS2088A, 8),
49 CPU_TYPE_ENTRY(LS2084A, LS2084A, 8),
50 CPU_TYPE_ENTRY(LS2048A, LS2048A, 4),
51 CPU_TYPE_ENTRY(LS2044A, LS2044A, 4),
52 CPU_TYPE_ENTRY(LS2081A, LS2081A, 8),
53 CPU_TYPE_ENTRY(LS2041A, LS2041A, 4),
54 CPU_TYPE_ENTRY(LS1043A, LS1043A, 4),
55 CPU_TYPE_ENTRY(LS1043A, LS1043A_P23, 4),
56 CPU_TYPE_ENTRY(LS1023A, LS1023A, 2),
57 CPU_TYPE_ENTRY(LS1023A, LS1023A_P23, 2),
58 CPU_TYPE_ENTRY(LS1046A, LS1046A, 4),
59 CPU_TYPE_ENTRY(LS1026A, LS1026A, 2),
60 CPU_TYPE_ENTRY(LS2040A, LS2040A, 4),
61 CPU_TYPE_ENTRY(LS1012A, LS1012A, 1),
62 CPU_TYPE_ENTRY(LS1028A, LS1028A, 2),
63 CPU_TYPE_ENTRY(LS1088A, LS1088A, 8),
64 CPU_TYPE_ENTRY(LS1084A, LS1084A, 8),
65 CPU_TYPE_ENTRY(LS1048A, LS1048A, 4),
66 CPU_TYPE_ENTRY(LS1044A, LS1044A, 4),
67 CPU_TYPE_ENTRY(LX2160A, LX2160A, 16),
68 CPU_TYPE_ENTRY(LX2120A, LX2120A, 12),
69 CPU_TYPE_ENTRY(LX2080A, LX2080A, 8),
72 #define EARLY_PGTABLE_SIZE 0x5000
73 static struct mm_region early_map[] = {
74 #ifdef CONFIG_FSL_LSCH3
75 { CONFIG_SYS_FSL_CCSR_BASE, CONFIG_SYS_FSL_CCSR_BASE,
76 CONFIG_SYS_FSL_CCSR_SIZE,
77 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
78 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
80 { CONFIG_SYS_FSL_OCRAM_BASE, CONFIG_SYS_FSL_OCRAM_BASE,
81 SYS_FSL_OCRAM_SPACE_SIZE,
82 PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
84 { CONFIG_SYS_FSL_QSPI_BASE1, CONFIG_SYS_FSL_QSPI_BASE1,
85 CONFIG_SYS_FSL_QSPI_SIZE1,
86 PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE},
88 /* For IFC Region #1, only the first 4MB is cache-enabled */
89 { CONFIG_SYS_FSL_IFC_BASE1, CONFIG_SYS_FSL_IFC_BASE1,
90 CONFIG_SYS_FSL_IFC_SIZE1_1,
91 PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
93 { CONFIG_SYS_FSL_IFC_BASE1 + CONFIG_SYS_FSL_IFC_SIZE1_1,
94 CONFIG_SYS_FSL_IFC_BASE1 + CONFIG_SYS_FSL_IFC_SIZE1_1,
95 CONFIG_SYS_FSL_IFC_SIZE1 - CONFIG_SYS_FSL_IFC_SIZE1_1,
96 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
98 { CONFIG_SYS_FLASH_BASE, CONFIG_SYS_FSL_IFC_BASE1,
99 CONFIG_SYS_FSL_IFC_SIZE1,
100 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
103 { CONFIG_SYS_FSL_DRAM_BASE1, CONFIG_SYS_FSL_DRAM_BASE1,
104 CONFIG_SYS_FSL_DRAM_SIZE1,
105 #if defined(CONFIG_TFABOOT) || \
106 (defined(CONFIG_SPL) && !defined(CONFIG_SPL_BUILD))
107 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
108 #else /* Start with nGnRnE and PXN and UXN to prevent speculative access */
109 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN |
111 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
113 #ifdef CONFIG_FSL_IFC
114 /* Map IFC region #2 up to CONFIG_SYS_FLASH_BASE for NAND boot */
115 { CONFIG_SYS_FSL_IFC_BASE2, CONFIG_SYS_FSL_IFC_BASE2,
116 CONFIG_SYS_FLASH_BASE - CONFIG_SYS_FSL_IFC_BASE2,
117 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
120 { CONFIG_SYS_FSL_DCSR_BASE, CONFIG_SYS_FSL_DCSR_BASE,
121 CONFIG_SYS_FSL_DCSR_SIZE,
122 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
123 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
125 { CONFIG_SYS_FSL_DRAM_BASE2, CONFIG_SYS_FSL_DRAM_BASE2,
126 CONFIG_SYS_FSL_DRAM_SIZE2,
127 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN |
128 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
130 #ifdef CONFIG_SYS_FSL_DRAM_BASE3
131 { CONFIG_SYS_FSL_DRAM_BASE3, CONFIG_SYS_FSL_DRAM_BASE3,
132 CONFIG_SYS_FSL_DRAM_SIZE3,
133 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN |
134 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
137 #elif defined(CONFIG_FSL_LSCH2)
138 { CONFIG_SYS_FSL_CCSR_BASE, CONFIG_SYS_FSL_CCSR_BASE,
139 CONFIG_SYS_FSL_CCSR_SIZE,
140 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
141 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
143 { CONFIG_SYS_FSL_OCRAM_BASE, CONFIG_SYS_FSL_OCRAM_BASE,
144 SYS_FSL_OCRAM_SPACE_SIZE,
145 PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
147 { CONFIG_SYS_FSL_DCSR_BASE, CONFIG_SYS_FSL_DCSR_BASE,
148 CONFIG_SYS_FSL_DCSR_SIZE,
149 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
150 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
152 { CONFIG_SYS_FSL_QSPI_BASE, CONFIG_SYS_FSL_QSPI_BASE,
153 CONFIG_SYS_FSL_QSPI_SIZE,
154 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
156 #ifdef CONFIG_FSL_IFC
157 { CONFIG_SYS_FSL_IFC_BASE, CONFIG_SYS_FSL_IFC_BASE,
158 CONFIG_SYS_FSL_IFC_SIZE,
159 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
162 { CONFIG_SYS_FSL_DRAM_BASE1, CONFIG_SYS_FSL_DRAM_BASE1,
163 CONFIG_SYS_FSL_DRAM_SIZE1,
164 #if defined(CONFIG_TFABOOT) || \
165 (defined(CONFIG_SPL) && !defined(CONFIG_SPL_BUILD))
166 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
167 #else /* Start with nGnRnE and PXN and UXN to prevent speculative access */
168 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN |
170 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
172 { CONFIG_SYS_FSL_DRAM_BASE2, CONFIG_SYS_FSL_DRAM_BASE2,
173 CONFIG_SYS_FSL_DRAM_SIZE2,
174 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN |
175 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
178 {}, /* list terminator */
181 static struct mm_region final_map[] = {
182 #ifdef CONFIG_FSL_LSCH3
183 { CONFIG_SYS_FSL_CCSR_BASE, CONFIG_SYS_FSL_CCSR_BASE,
184 CONFIG_SYS_FSL_CCSR_SIZE,
185 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
186 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
188 { CONFIG_SYS_FSL_OCRAM_BASE, CONFIG_SYS_FSL_OCRAM_BASE,
189 SYS_FSL_OCRAM_SPACE_SIZE,
190 PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
192 { CONFIG_SYS_FSL_DRAM_BASE1, CONFIG_SYS_FSL_DRAM_BASE1,
193 CONFIG_SYS_FSL_DRAM_SIZE1,
194 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
195 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
197 { CONFIG_SYS_FSL_QSPI_BASE1, CONFIG_SYS_FSL_QSPI_BASE1,
198 CONFIG_SYS_FSL_QSPI_SIZE1,
199 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
200 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
202 { CONFIG_SYS_FSL_QSPI_BASE2, CONFIG_SYS_FSL_QSPI_BASE2,
203 CONFIG_SYS_FSL_QSPI_SIZE2,
204 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
205 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
207 #ifdef CONFIG_FSL_IFC
208 { CONFIG_SYS_FSL_IFC_BASE2, CONFIG_SYS_FSL_IFC_BASE2,
209 CONFIG_SYS_FSL_IFC_SIZE2,
210 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
211 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
214 { CONFIG_SYS_FSL_DCSR_BASE, CONFIG_SYS_FSL_DCSR_BASE,
215 CONFIG_SYS_FSL_DCSR_SIZE,
216 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
217 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
219 { CONFIG_SYS_FSL_MC_BASE, CONFIG_SYS_FSL_MC_BASE,
220 CONFIG_SYS_FSL_MC_SIZE,
221 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
222 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
224 { CONFIG_SYS_FSL_NI_BASE, CONFIG_SYS_FSL_NI_BASE,
225 CONFIG_SYS_FSL_NI_SIZE,
226 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
227 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
229 /* For QBMAN portal, only the first 64MB is cache-enabled */
230 { CONFIG_SYS_FSL_QBMAN_BASE, CONFIG_SYS_FSL_QBMAN_BASE,
231 CONFIG_SYS_FSL_QBMAN_SIZE_1,
232 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
233 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN | PTE_BLOCK_NS
235 { CONFIG_SYS_FSL_QBMAN_BASE + CONFIG_SYS_FSL_QBMAN_SIZE_1,
236 CONFIG_SYS_FSL_QBMAN_BASE + CONFIG_SYS_FSL_QBMAN_SIZE_1,
237 CONFIG_SYS_FSL_QBMAN_SIZE - CONFIG_SYS_FSL_QBMAN_SIZE_1,
238 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
239 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
241 { CONFIG_SYS_PCIE1_PHYS_ADDR, CONFIG_SYS_PCIE1_PHYS_ADDR,
242 CONFIG_SYS_PCIE1_PHYS_SIZE,
243 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
244 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
246 { CONFIG_SYS_PCIE2_PHYS_ADDR, CONFIG_SYS_PCIE2_PHYS_ADDR,
247 CONFIG_SYS_PCIE2_PHYS_SIZE,
248 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
249 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
251 #ifdef CONFIG_SYS_PCIE3_PHYS_ADDR
252 { CONFIG_SYS_PCIE3_PHYS_ADDR, CONFIG_SYS_PCIE3_PHYS_ADDR,
253 CONFIG_SYS_PCIE3_PHYS_SIZE,
254 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
255 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
258 #ifdef CONFIG_SYS_PCIE4_PHYS_ADDR
259 { CONFIG_SYS_PCIE4_PHYS_ADDR, CONFIG_SYS_PCIE4_PHYS_ADDR,
260 CONFIG_SYS_PCIE4_PHYS_SIZE,
261 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
262 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
265 #ifdef SYS_PCIE5_PHYS_ADDR
266 { SYS_PCIE5_PHYS_ADDR, SYS_PCIE5_PHYS_ADDR,
268 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
269 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
272 #ifdef SYS_PCIE6_PHYS_ADDR
273 { SYS_PCIE6_PHYS_ADDR, SYS_PCIE6_PHYS_ADDR,
275 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
276 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
279 { CONFIG_SYS_FSL_WRIOP1_BASE, CONFIG_SYS_FSL_WRIOP1_BASE,
280 CONFIG_SYS_FSL_WRIOP1_SIZE,
281 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
282 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
284 { CONFIG_SYS_FSL_AIOP1_BASE, CONFIG_SYS_FSL_AIOP1_BASE,
285 CONFIG_SYS_FSL_AIOP1_SIZE,
286 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
287 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
289 { CONFIG_SYS_FSL_PEBUF_BASE, CONFIG_SYS_FSL_PEBUF_BASE,
290 CONFIG_SYS_FSL_PEBUF_SIZE,
291 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
292 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
294 { CONFIG_SYS_FSL_DRAM_BASE2, CONFIG_SYS_FSL_DRAM_BASE2,
295 CONFIG_SYS_FSL_DRAM_SIZE2,
296 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
297 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
299 #ifdef CONFIG_SYS_FSL_DRAM_BASE3
300 { CONFIG_SYS_FSL_DRAM_BASE3, CONFIG_SYS_FSL_DRAM_BASE3,
301 CONFIG_SYS_FSL_DRAM_SIZE3,
302 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
303 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
306 #elif defined(CONFIG_FSL_LSCH2)
307 { CONFIG_SYS_FSL_BOOTROM_BASE, CONFIG_SYS_FSL_BOOTROM_BASE,
308 CONFIG_SYS_FSL_BOOTROM_SIZE,
309 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
310 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
312 { CONFIG_SYS_FSL_CCSR_BASE, CONFIG_SYS_FSL_CCSR_BASE,
313 CONFIG_SYS_FSL_CCSR_SIZE,
314 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
315 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
317 { CONFIG_SYS_FSL_OCRAM_BASE, CONFIG_SYS_FSL_OCRAM_BASE,
318 SYS_FSL_OCRAM_SPACE_SIZE,
319 PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
321 { CONFIG_SYS_FSL_DCSR_BASE, CONFIG_SYS_FSL_DCSR_BASE,
322 CONFIG_SYS_FSL_DCSR_SIZE,
323 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
324 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
326 { CONFIG_SYS_FSL_QSPI_BASE, CONFIG_SYS_FSL_QSPI_BASE,
327 CONFIG_SYS_FSL_QSPI_SIZE,
328 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
329 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
331 #ifdef CONFIG_FSL_IFC
332 { CONFIG_SYS_FSL_IFC_BASE, CONFIG_SYS_FSL_IFC_BASE,
333 CONFIG_SYS_FSL_IFC_SIZE,
334 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
337 { CONFIG_SYS_FSL_DRAM_BASE1, CONFIG_SYS_FSL_DRAM_BASE1,
338 CONFIG_SYS_FSL_DRAM_SIZE1,
339 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
340 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
342 { CONFIG_SYS_FSL_QBMAN_BASE, CONFIG_SYS_FSL_QBMAN_BASE,
343 CONFIG_SYS_FSL_QBMAN_SIZE,
344 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
345 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
347 { CONFIG_SYS_FSL_DRAM_BASE2, CONFIG_SYS_FSL_DRAM_BASE2,
348 CONFIG_SYS_FSL_DRAM_SIZE2,
349 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
350 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
352 { CONFIG_SYS_PCIE1_PHYS_ADDR, CONFIG_SYS_PCIE1_PHYS_ADDR,
353 CONFIG_SYS_PCIE1_PHYS_SIZE,
354 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
355 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
357 { CONFIG_SYS_PCIE2_PHYS_ADDR, CONFIG_SYS_PCIE2_PHYS_ADDR,
358 CONFIG_SYS_PCIE2_PHYS_SIZE,
359 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
360 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
362 #ifdef CONFIG_SYS_PCIE3_PHYS_ADDR
363 { CONFIG_SYS_PCIE3_PHYS_ADDR, CONFIG_SYS_PCIE3_PHYS_ADDR,
364 CONFIG_SYS_PCIE3_PHYS_SIZE,
365 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
366 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
369 { CONFIG_SYS_FSL_DRAM_BASE3, CONFIG_SYS_FSL_DRAM_BASE3,
370 CONFIG_SYS_FSL_DRAM_SIZE3,
371 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
372 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
375 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
376 {}, /* space holder for secure mem */
381 struct mm_region *mem_map = early_map;
383 void cpu_name(char *name)
385 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
386 unsigned int i, svr, ver;
388 svr = gur_in32(&gur->svr);
389 ver = SVR_SOC_VER(svr);
391 for (i = 0; i < ARRAY_SIZE(cpu_type_list); i++)
392 if ((cpu_type_list[i].soc_ver & SVR_WO_E) == ver) {
393 strcpy(name, cpu_type_list[i].name);
394 #ifdef CONFIG_ARCH_LX2160A
395 if (IS_C_PROCESSOR(svr))
399 if (IS_E_PROCESSOR(svr))
402 sprintf(name + strlen(name), " Rev%d.%d",
403 SVR_MAJ(svr), SVR_MIN(svr));
407 if (i == ARRAY_SIZE(cpu_type_list))
408 strcpy(name, "unknown");
411 #if !CONFIG_IS_ENABLED(SYS_DCACHE_OFF)
413 * To start MMU before DDR is available, we create MMU table in SRAM.
414 * The base address of SRAM is CONFIG_SYS_FSL_OCRAM_BASE. We use three
415 * levels of translation tables here to cover 40-bit address space.
416 * We use 4KB granule size, with 40 bits physical address, T0SZ=24
417 * Address above EARLY_PGTABLE_SIZE (0x5000) is free for other purpose.
418 * Note, the debug print in cache_v8.c is not usable for debugging
419 * these early MMU tables because UART is not yet available.
421 static inline void early_mmu_setup(void)
423 unsigned int el = current_el();
425 /* global data is already setup, no allocation yet */
427 gd->arch.tlb_addr = CONFIG_SYS_FSL_OCRAM_BASE;
429 gd->arch.tlb_addr = CONFIG_SYS_DDR_SDRAM_BASE;
430 gd->arch.tlb_fillptr = gd->arch.tlb_addr;
431 gd->arch.tlb_size = EARLY_PGTABLE_SIZE;
433 /* Create early page tables */
436 /* point TTBR to the new table */
437 set_ttbr_tcr_mair(el, gd->arch.tlb_addr,
438 get_tcr(el, NULL, NULL) &
439 ~(TCR_ORGN_MASK | TCR_IRGN_MASK),
442 set_sctlr(get_sctlr() | CR_M);
445 static void fix_pcie_mmu_map(void)
447 #ifdef CONFIG_ARCH_LS2080A
450 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
452 svr = gur_in32(&gur->svr);
453 ver = SVR_SOC_VER(svr);
455 /* Fix PCIE base and size for LS2088A */
456 if ((ver == SVR_LS2088A) || (ver == SVR_LS2084A) ||
457 (ver == SVR_LS2048A) || (ver == SVR_LS2044A) ||
458 (ver == SVR_LS2081A) || (ver == SVR_LS2041A)) {
459 for (i = 0; i < ARRAY_SIZE(final_map); i++) {
460 switch (final_map[i].phys) {
461 case CONFIG_SYS_PCIE1_PHYS_ADDR:
462 final_map[i].phys = 0x2000000000ULL;
463 final_map[i].virt = 0x2000000000ULL;
464 final_map[i].size = 0x800000000ULL;
466 case CONFIG_SYS_PCIE2_PHYS_ADDR:
467 final_map[i].phys = 0x2800000000ULL;
468 final_map[i].virt = 0x2800000000ULL;
469 final_map[i].size = 0x800000000ULL;
471 #ifdef CONFIG_SYS_PCIE3_PHYS_ADDR
472 case CONFIG_SYS_PCIE3_PHYS_ADDR:
473 final_map[i].phys = 0x3000000000ULL;
474 final_map[i].virt = 0x3000000000ULL;
475 final_map[i].size = 0x800000000ULL;
478 #ifdef CONFIG_SYS_PCIE4_PHYS_ADDR
479 case CONFIG_SYS_PCIE4_PHYS_ADDR:
480 final_map[i].phys = 0x3800000000ULL;
481 final_map[i].virt = 0x3800000000ULL;
482 final_map[i].size = 0x800000000ULL;
494 * The final tables look similar to early tables, but different in detail.
495 * These tables are in DRAM. Sub tables are added to enable cache for
498 * Put the MMU table in secure memory if gd->arch.secure_ram is valid.
499 * OCRAM will be not used for this purpose so gd->arch.secure_ram can't be 0.
501 static inline void final_mmu_setup(void)
503 u64 tlb_addr_save = gd->arch.tlb_addr;
504 unsigned int el = current_el();
507 /* fix the final_map before filling in the block entries */
512 /* Update mapping for DDR to actual size */
513 for (index = 0; index < ARRAY_SIZE(final_map) - 2; index++) {
515 * Find the entry for DDR mapping and update the address and
516 * size. Zero-sized mapping will be skipped when creating MMU
519 switch (final_map[index].virt) {
520 case CONFIG_SYS_FSL_DRAM_BASE1:
521 final_map[index].virt = gd->bd->bi_dram[0].start;
522 final_map[index].phys = gd->bd->bi_dram[0].start;
523 final_map[index].size = gd->bd->bi_dram[0].size;
525 #ifdef CONFIG_SYS_FSL_DRAM_BASE2
526 case CONFIG_SYS_FSL_DRAM_BASE2:
527 #if (CONFIG_NR_DRAM_BANKS >= 2)
528 final_map[index].virt = gd->bd->bi_dram[1].start;
529 final_map[index].phys = gd->bd->bi_dram[1].start;
530 final_map[index].size = gd->bd->bi_dram[1].size;
532 final_map[index].size = 0;
536 #ifdef CONFIG_SYS_FSL_DRAM_BASE3
537 case CONFIG_SYS_FSL_DRAM_BASE3:
538 #if (CONFIG_NR_DRAM_BANKS >= 3)
539 final_map[index].virt = gd->bd->bi_dram[2].start;
540 final_map[index].phys = gd->bd->bi_dram[2].start;
541 final_map[index].size = gd->bd->bi_dram[2].size;
543 final_map[index].size = 0;
552 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
553 if (gd->arch.secure_ram & MEM_RESERVE_SECURE_MAINTAINED) {
556 * Only use gd->arch.secure_ram if the address is
557 * recalculated. Align to 4KB for MMU table.
559 /* put page tables in secure ram */
560 index = ARRAY_SIZE(final_map) - 2;
561 gd->arch.tlb_addr = gd->arch.secure_ram & ~0xfff;
562 final_map[index].virt = gd->arch.secure_ram & ~0x3;
563 final_map[index].phys = final_map[index].virt;
564 final_map[index].size = CONFIG_SYS_MEM_RESERVE_SECURE;
565 final_map[index].attrs = PTE_BLOCK_OUTER_SHARE;
566 gd->arch.secure_ram |= MEM_RESERVE_SECURE_SECURED;
567 tlb_addr_save = gd->arch.tlb_addr;
569 /* Use allocated (board_f.c) memory for TLB */
570 tlb_addr_save = gd->arch.tlb_allocated;
571 gd->arch.tlb_addr = tlb_addr_save;
576 /* Reset the fill ptr */
577 gd->arch.tlb_fillptr = tlb_addr_save;
579 /* Create normal system page tables */
582 /* Create emergency page tables */
583 gd->arch.tlb_addr = gd->arch.tlb_fillptr;
584 gd->arch.tlb_emerg = gd->arch.tlb_addr;
586 gd->arch.tlb_addr = tlb_addr_save;
588 /* Disable cache and MMU */
589 dcache_disable(); /* TLBs are invalidated */
590 invalidate_icache_all();
592 /* point TTBR to the new table */
593 set_ttbr_tcr_mair(el, gd->arch.tlb_addr, get_tcr(el, NULL, NULL),
596 set_sctlr(get_sctlr() | CR_M);
599 u64 get_page_table_size(void)
604 int arch_cpu_init(void)
607 * This function is called before U-Boot relocates itself to speed up
608 * on system running. It is not necessary to run if performance is not
609 * critical. Skip if MMU is already enabled by SPL or other means.
611 if (get_sctlr() & CR_M)
615 __asm_invalidate_dcache_all();
616 __asm_invalidate_tlb_all();
618 set_sctlr(get_sctlr() | CR_C);
628 * This function is called from common/board_r.c.
629 * It recreates MMU table in main memory.
631 void enable_caches(void)
634 __asm_invalidate_tlb_all();
638 #endif /* !CONFIG_IS_ENABLED(SYS_DCACHE_OFF) */
640 #ifdef CONFIG_TFABOOT
641 enum boot_src __get_boot_src(u32 porsr1)
643 enum boot_src src = BOOT_SOURCE_RESERVED;
644 u32 rcw_src = (porsr1 & RCW_SRC_MASK) >> RCW_SRC_BIT;
645 #if !defined(CONFIG_NXP_LSCH3_2)
648 debug("%s: rcw_src 0x%x\n", __func__, rcw_src);
650 #if defined(CONFIG_FSL_LSCH3)
651 #if defined(CONFIG_NXP_LSCH3_2)
653 case RCW_SRC_SDHC1_VAL:
654 src = BOOT_SOURCE_SD_MMC;
656 case RCW_SRC_SDHC2_VAL:
657 src = BOOT_SOURCE_SD_MMC2;
659 case RCW_SRC_I2C1_VAL:
660 src = BOOT_SOURCE_I2C1_EXTENDED;
662 case RCW_SRC_FLEXSPI_NAND2K_VAL:
663 src = BOOT_SOURCE_XSPI_NAND;
665 case RCW_SRC_FLEXSPI_NAND4K_VAL:
666 src = BOOT_SOURCE_XSPI_NAND;
668 case RCW_SRC_RESERVED_1_VAL:
669 src = BOOT_SOURCE_RESERVED;
671 case RCW_SRC_FLEXSPI_NOR_24B:
672 src = BOOT_SOURCE_XSPI_NOR;
675 src = BOOT_SOURCE_RESERVED;
678 val = rcw_src & RCW_SRC_TYPE_MASK;
679 if (val == RCW_SRC_NOR_VAL) {
680 val = rcw_src & NOR_TYPE_MASK;
685 src = BOOT_SOURCE_IFC_NOR;
688 src = BOOT_SOURCE_RESERVED;
691 /* RCW SRC Serial Flash */
692 val = rcw_src & RCW_SRC_SERIAL_MASK;
694 case RCW_SRC_QSPI_VAL:
695 /* RCW SRC Serial NOR (QSPI) */
696 src = BOOT_SOURCE_QSPI_NOR;
698 case RCW_SRC_SD_CARD_VAL:
699 /* RCW SRC SD Card */
700 src = BOOT_SOURCE_SD_MMC;
702 case RCW_SRC_EMMC_VAL:
704 src = BOOT_SOURCE_SD_MMC;
706 case RCW_SRC_I2C1_VAL:
707 /* RCW SRC I2C1 Extended */
708 src = BOOT_SOURCE_I2C1_EXTENDED;
711 src = BOOT_SOURCE_RESERVED;
715 #elif defined(CONFIG_FSL_LSCH2)
717 val = rcw_src & RCW_SRC_NAND_MASK;
718 if (val == RCW_SRC_NAND_VAL) {
719 val = rcw_src & NAND_RESERVED_MASK;
720 if (val != NAND_RESERVED_1 && val != NAND_RESERVED_2)
721 src = BOOT_SOURCE_IFC_NAND;
725 val = rcw_src & RCW_SRC_NOR_MASK;
726 if (val == NOR_8B_VAL || val == NOR_16B_VAL) {
727 src = BOOT_SOURCE_IFC_NOR;
732 src = BOOT_SOURCE_QSPI_NOR;
735 src = BOOT_SOURCE_SD_MMC;
738 src = BOOT_SOURCE_RESERVED;
744 if (CONFIG_IS_ENABLED(SYS_FSL_ERRATUM_A010539) && !rcw_src)
745 src = BOOT_SOURCE_QSPI_NOR;
747 debug("%s: src 0x%x\n", __func__, src);
751 enum boot_src get_boot_src(void)
756 #if defined(CONFIG_FSL_LSCH3)
757 u32 __iomem *dcfg_ccsr = (u32 __iomem *)DCFG_BASE;
758 #elif defined(CONFIG_FSL_LSCH2)
759 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
762 if (current_el() == 2) {
763 regs.regs[0] = SIP_SVC_RCW;
767 porsr1 = regs.regs[1];
770 if (current_el() == 3 || !porsr1) {
771 #ifdef CONFIG_FSL_LSCH3
772 porsr1 = in_le32(dcfg_ccsr + DCFG_PORSR1 / 4);
773 #elif defined(CONFIG_FSL_LSCH2)
774 porsr1 = in_be32(&gur->porsr1);
778 debug("%s: porsr1 0x%x\n", __func__, porsr1);
780 return __get_boot_src(porsr1);
783 #ifdef CONFIG_ENV_IS_IN_MMC
784 int mmc_get_env_dev(void)
786 enum boot_src src = get_boot_src();
787 int dev = CONFIG_SYS_MMC_ENV_DEV;
790 case BOOT_SOURCE_SD_MMC:
793 case BOOT_SOURCE_SD_MMC2:
804 enum env_location env_get_location(enum env_operation op, int prio)
806 enum boot_src src = get_boot_src();
807 enum env_location env_loc = ENVL_NOWHERE;
812 #ifdef CONFIG_ENV_IS_NOWHERE
817 case BOOT_SOURCE_IFC_NOR:
818 env_loc = ENVL_FLASH;
820 case BOOT_SOURCE_QSPI_NOR:
822 case BOOT_SOURCE_XSPI_NOR:
823 env_loc = ENVL_SPI_FLASH;
825 case BOOT_SOURCE_IFC_NAND:
827 case BOOT_SOURCE_QSPI_NAND:
829 case BOOT_SOURCE_XSPI_NAND:
832 case BOOT_SOURCE_SD_MMC:
834 case BOOT_SOURCE_SD_MMC2:
837 case BOOT_SOURCE_I2C1_EXTENDED:
845 #endif /* CONFIG_TFABOOT */
847 u32 initiator_type(u32 cluster, int init_id)
849 struct ccsr_gur *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
850 u32 idx = (cluster >> (init_id * 8)) & TP_CLUSTER_INIT_MASK;
853 type = gur_in32(&gur->tp_ityp[idx]);
854 if (type & TP_ITYP_AV)
860 u32 cpu_pos_mask(void)
862 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
864 u32 cluster, type, mask = 0;
869 cluster = gur_in32(&gur->tp_cluster[i].lower);
870 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
871 type = initiator_type(cluster, j);
872 if (type && (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM))
873 mask |= 1 << (i * TP_INIT_PER_CLUSTER + j);
876 } while ((cluster & TP_CLUSTER_EOC) == 0x0);
883 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
884 int i = 0, count = 0;
885 u32 cluster, type, mask = 0;
890 cluster = gur_in32(&gur->tp_cluster[i].lower);
891 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
892 type = initiator_type(cluster, j);
894 if (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM)
900 } while ((cluster & TP_CLUSTER_EOC) == 0x0);
906 * Return the number of cores on this SOC.
908 int cpu_numcores(void)
910 return hweight32(cpu_mask());
913 int fsl_qoriq_core_to_cluster(unsigned int core)
915 struct ccsr_gur __iomem *gur =
916 (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR);
917 int i = 0, count = 0;
923 cluster = gur_in32(&gur->tp_cluster[i].lower);
924 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
925 if (initiator_type(cluster, j)) {
932 } while ((cluster & TP_CLUSTER_EOC) == 0x0);
934 return -1; /* cannot identify the cluster */
937 u32 fsl_qoriq_core_to_type(unsigned int core)
939 struct ccsr_gur __iomem *gur =
940 (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR);
941 int i = 0, count = 0;
947 cluster = gur_in32(&gur->tp_cluster[i].lower);
948 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
949 type = initiator_type(cluster, j);
957 } while ((cluster & TP_CLUSTER_EOC) == 0x0);
959 return -1; /* cannot identify the cluster */
962 #ifndef CONFIG_FSL_LSCH3
965 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
967 return gur_in32(&gur->svr);
971 #ifdef CONFIG_DISPLAY_CPUINFO
972 int print_cpuinfo(void)
974 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
975 struct sys_info sysinfo;
977 unsigned int i, core;
978 u32 type, rcw, svr = gur_in32(&gur->svr);
983 printf(" %s (0x%x)\n", buf, svr);
984 memset((u8 *)buf, 0x00, ARRAY_SIZE(buf));
985 get_sys_info(&sysinfo);
986 puts("Clock Configuration:");
987 for_each_cpu(i, core, cpu_numcores(), cpu_mask()) {
990 type = TP_ITYP_VER(fsl_qoriq_core_to_type(core));
991 printf("CPU%d(%s):%-4s MHz ", core,
992 type == TY_ITYP_VER_A7 ? "A7 " :
993 (type == TY_ITYP_VER_A53 ? "A53" :
994 (type == TY_ITYP_VER_A57 ? "A57" :
995 (type == TY_ITYP_VER_A72 ? "A72" : " "))),
996 strmhz(buf, sysinfo.freq_processor[core]));
998 /* Display platform clock as Bus frequency. */
999 printf("\n Bus: %-4s MHz ",
1000 strmhz(buf, sysinfo.freq_systembus / CONFIG_SYS_FSL_PCLK_DIV));
1001 printf("DDR: %-4s MT/s", strmhz(buf, sysinfo.freq_ddrbus));
1002 #ifdef CONFIG_SYS_DPAA_FMAN
1003 printf(" FMAN: %-4s MHz", strmhz(buf, sysinfo.freq_fman[0]));
1005 #ifdef CONFIG_SYS_FSL_HAS_DP_DDR
1006 if (soc_has_dp_ddr()) {
1007 printf(" DP-DDR: %-4s MT/s",
1008 strmhz(buf, sysinfo.freq_ddrbus2));
1014 * Display the RCW, so that no one gets confused as to what RCW
1015 * we're actually using for this boot.
1017 puts("Reset Configuration Word (RCW):");
1018 for (i = 0; i < ARRAY_SIZE(gur->rcwsr); i++) {
1019 rcw = gur_in32(&gur->rcwsr[i]);
1021 printf("\n %08x:", i * 4);
1022 printf(" %08x", rcw);
1030 #ifdef CONFIG_FSL_ESDHC
1031 int cpu_mmc_init(bd_t *bis)
1033 return fsl_esdhc_mmc_init(bis);
1037 int cpu_eth_init(bd_t *bis)
1041 #if defined(CONFIG_FSL_MC_ENET) && !defined(CONFIG_SPL_BUILD)
1042 error = fsl_mc_ldpaa_init(bis);
1044 #ifdef CONFIG_FMAN_ENET
1045 fm_standard_init(bis);
1050 static inline int check_psci(void)
1052 unsigned int psci_ver;
1054 psci_ver = sec_firmware_support_psci_version();
1055 if (psci_ver == PSCI_INVALID_VER)
1061 static void config_core_prefetch(void)
1064 char buffer[HWCONFIG_BUFFER_SIZE];
1065 const char *prefetch_arg = NULL;
1068 struct pt_regs regs;
1070 if (env_get_f("hwconfig", buffer, sizeof(buffer)) > 0)
1073 prefetch_arg = hwconfig_subarg_f("core_prefetch", "disable",
1077 mask = simple_strtoul(prefetch_arg, NULL, 0) & 0xff;
1079 printf("Core0 prefetch can't be disabled\n");
1083 #define SIP_PREFETCH_DISABLE_64 0xC200FF13
1084 regs.regs[0] = SIP_PREFETCH_DISABLE_64;
1085 regs.regs[1] = mask;
1089 printf("Prefetch disable config failed for mask ");
1091 printf("Prefetch disable config passed for mask ");
1092 printf("0x%x\n", mask);
1096 int arch_early_init_r(void)
1098 #ifdef CONFIG_SYS_FSL_ERRATUM_A009635
1101 * erratum A009635 is valid only for LS2080A SoC and
1102 * its personalitiesi
1104 svr_dev_id = get_svr();
1105 if (IS_SVR_DEV(svr_dev_id, SVR_DEV(SVR_LS2080A)))
1108 #if defined(CONFIG_SYS_FSL_ERRATUM_A009942) && defined(CONFIG_SYS_FSL_DDR)
1109 erratum_a009942_check_cpo();
1112 debug("PSCI: PSCI does not exist.\n");
1114 /* if PSCI does not exist, boot secondary cores here */
1115 if (fsl_layerscape_wake_seconday_cores())
1116 printf("Did not wake secondary cores\n");
1119 config_core_prefetch();
1121 #ifdef CONFIG_SYS_HAS_SERDES
1124 #ifdef CONFIG_SYS_FSL_HAS_RGMII
1125 /* some dpmacs in armv8a based freescale layerscape SOCs can be
1126 * configured via both serdes(sgmii, xfi, xlaui etc) bits and via
1127 * EC*_PMUX(rgmii) bits in RCW.
1128 * e.g. dpmac 17 and 18 in LX2160A can be configured as SGMII from
1129 * serdes bits and as RGMII via EC1_PMUX/EC2_PMUX bits
1130 * Now if a dpmac is enabled by serdes bits then it takes precedence
1131 * over EC*_PMUX bits. i.e. in LX2160A if we select serdes protocol
1132 * that configures dpmac17 as SGMII and set the EC1_PMUX as RGMII,
1133 * then the dpmac is SGMII and not RGMII.
1135 * Therefore, move the fsl_rgmii_init after fsl_serdes_init. in
1136 * fsl_rgmii_init function of SOC, we will check if the dpmac is enabled
1137 * or not? if it is (fsl_serdes_init has already enabled the dpmac),
1138 * then don't enable it.
1142 #ifdef CONFIG_FMAN_ENET
1145 #ifdef CONFIG_SYS_DPAA_QBMAN
1146 setup_qbman_portals();
1151 int timer_init(void)
1153 u32 __iomem *cntcr = (u32 *)CONFIG_SYS_FSL_TIMER_ADDR;
1154 #ifdef CONFIG_FSL_LSCH3
1155 u32 __iomem *cltbenr = (u32 *)CONFIG_SYS_FSL_PMU_CLTBENR;
1157 #if defined(CONFIG_ARCH_LS2080A) || defined(CONFIG_ARCH_LS1088A) || \
1158 defined(CONFIG_ARCH_LS1028A)
1159 u32 __iomem *pctbenr = (u32 *)FSL_PMU_PCTBENR_OFFSET;
1162 #ifdef COUNTER_FREQUENCY_REAL
1163 unsigned long cntfrq = COUNTER_FREQUENCY_REAL;
1165 /* Update with accurate clock frequency */
1166 if (current_el() == 3)
1167 asm volatile("msr cntfrq_el0, %0" : : "r" (cntfrq) : "memory");
1170 #ifdef CONFIG_FSL_LSCH3
1171 /* Enable timebase for all clusters.
1172 * It is safe to do so even some clusters are not enabled.
1174 out_le32(cltbenr, 0xf);
1177 #if defined(CONFIG_ARCH_LS2080A) || defined(CONFIG_ARCH_LS1088A) || \
1178 defined(CONFIG_ARCH_LS1028A)
1180 * In certain Layerscape SoCs, the clock for each core's
1181 * has an enable bit in the PMU Physical Core Time Base Enable
1182 * Register (PCTBENR), which allows the watchdog to operate.
1184 setbits_le32(pctbenr, 0xff);
1186 * For LS2080A SoC and its personalities, timer controller
1187 * offset is different
1189 svr_dev_id = get_svr();
1190 if (IS_SVR_DEV(svr_dev_id, SVR_DEV(SVR_LS2080A)))
1191 cntcr = (u32 *)SYS_FSL_LS2080A_LS2085A_TIMER_ADDR;
1195 /* Enable clock for timer
1196 * This is a global setting.
1198 out_le32(cntcr, 0x1);
1203 __efi_runtime_data u32 __iomem *rstcr = (u32 *)CONFIG_SYS_FSL_RST_ADDR;
1205 void __efi_runtime reset_cpu(ulong addr)
1209 #ifdef CONFIG_ARCH_LX2160A
1210 val = in_le32(rstcr);
1212 out_le32(rstcr, val);
1214 /* Raise RESET_REQ_B */
1215 val = scfg_in32(rstcr);
1217 scfg_out32(rstcr, val);
1221 #ifdef CONFIG_EFI_LOADER
1223 void __efi_runtime EFIAPI efi_reset_system(
1224 enum efi_reset_type reset_type,
1225 efi_status_t reset_status,
1226 unsigned long data_size, void *reset_data)
1228 switch (reset_type) {
1229 case EFI_RESET_COLD:
1230 case EFI_RESET_WARM:
1231 case EFI_RESET_PLATFORM_SPECIFIC:
1234 case EFI_RESET_SHUTDOWN:
1235 /* Nothing we can do */
1242 efi_status_t efi_reset_system_init(void)
1244 return efi_add_runtime_mmio(&rstcr, sizeof(*rstcr));
1250 * Calculate reserved memory with given memory bank
1251 * Return aligned memory size on success
1252 * Return (ram_size + needed size) for failure
1254 phys_size_t board_reserve_ram_top(phys_size_t ram_size)
1256 phys_size_t ram_top = ram_size;
1258 #if defined(CONFIG_FSL_MC_ENET) && !defined(CONFIG_SPL_BUILD)
1259 ram_top = mc_get_dram_block_size();
1260 if (ram_top > ram_size)
1261 return ram_size + ram_top;
1263 ram_top = ram_size - ram_top;
1264 /* The start address of MC reserved memory needs to be aligned. */
1265 ram_top &= ~(CONFIG_SYS_MC_RSV_MEM_ALIGN - 1);
1268 return ram_size - ram_top;
1271 phys_size_t get_effective_memsize(void)
1273 phys_size_t ea_size, rem = 0;
1276 * For ARMv8 SoCs, DDR memory is split into two or three regions. The
1277 * first region is 2GB space at 0x8000_0000. Secure memory needs to
1278 * allocated from first region. If the memory extends to the second
1279 * region (or the third region if applicable), Management Complex (MC)
1280 * memory should be put into the highest region, i.e. the end of DDR
1281 * memory. CONFIG_MAX_MEM_MAPPED is set to the size of first region so
1282 * U-Boot doesn't relocate itself into higher address. Should DDR be
1283 * configured to skip the first region, this function needs to be
1286 if (gd->ram_size > CONFIG_MAX_MEM_MAPPED) {
1287 ea_size = CONFIG_MAX_MEM_MAPPED;
1288 rem = gd->ram_size - ea_size;
1290 ea_size = gd->ram_size;
1293 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
1294 /* Check if we have enough space for secure memory */
1295 if (ea_size > CONFIG_SYS_MEM_RESERVE_SECURE)
1296 ea_size -= CONFIG_SYS_MEM_RESERVE_SECURE;
1298 printf("Error: No enough space for secure memory.\n");
1300 /* Check if we have enough memory for MC */
1301 if (rem < board_reserve_ram_top(rem)) {
1302 /* Not enough memory in high region to reserve */
1303 if (ea_size > board_reserve_ram_top(ea_size))
1304 ea_size -= board_reserve_ram_top(ea_size);
1306 printf("Error: No enough space for reserved memory.\n");
1312 #ifdef CONFIG_TFABOOT
1313 phys_size_t tfa_get_dram_size(void)
1315 struct pt_regs regs;
1316 phys_size_t dram_size = 0;
1318 regs.regs[0] = SMC_DRAM_BANK_INFO;
1325 dram_size = regs.regs[1];
1329 static int tfa_dram_init_banksize(void)
1332 struct pt_regs regs;
1333 phys_size_t dram_size = tfa_get_dram_size();
1335 debug("dram_size %llx\n", dram_size);
1341 regs.regs[0] = SMC_DRAM_BANK_INFO;
1350 debug("bank[%d]: start %lx, size %lx\n", i, regs.regs[1],
1352 gd->bd->bi_dram[i].start = regs.regs[1];
1353 gd->bd->bi_dram[i].size = regs.regs[2];
1355 dram_size -= gd->bd->bi_dram[i].size;
1358 } while (dram_size);
1363 #if defined(CONFIG_FSL_MC_ENET) && !defined(CONFIG_SPL_BUILD)
1364 /* Assign memory for MC */
1365 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
1366 if (gd->bd->bi_dram[2].size >=
1367 board_reserve_ram_top(gd->bd->bi_dram[2].size)) {
1368 gd->arch.resv_ram = gd->bd->bi_dram[2].start +
1369 gd->bd->bi_dram[2].size -
1370 board_reserve_ram_top(gd->bd->bi_dram[2].size);
1374 if (gd->bd->bi_dram[1].size >=
1375 board_reserve_ram_top(gd->bd->bi_dram[1].size)) {
1376 gd->arch.resv_ram = gd->bd->bi_dram[1].start +
1377 gd->bd->bi_dram[1].size -
1378 board_reserve_ram_top(gd->bd->bi_dram[1].size);
1379 } else if (gd->bd->bi_dram[0].size >
1380 board_reserve_ram_top(gd->bd->bi_dram[0].size)) {
1381 gd->arch.resv_ram = gd->bd->bi_dram[0].start +
1382 gd->bd->bi_dram[0].size -
1383 board_reserve_ram_top(gd->bd->bi_dram[0].size);
1386 #endif /* CONFIG_FSL_MC_ENET */
1392 int dram_init_banksize(void)
1394 #ifdef CONFIG_SYS_DP_DDR_BASE_PHY
1395 phys_size_t dp_ddr_size;
1398 #ifdef CONFIG_TFABOOT
1399 if (!tfa_dram_init_banksize())
1403 * gd->ram_size has the total size of DDR memory, less reserved secure
1404 * memory. The DDR extends from low region to high region(s) presuming
1405 * no hole is created with DDR configuration. gd->arch.secure_ram tracks
1406 * the location of secure memory. gd->arch.resv_ram tracks the location
1407 * of reserved memory for Management Complex (MC). Because gd->ram_size
1408 * is reduced by this function if secure memory is reserved, checking
1409 * gd->arch.secure_ram should be done to avoid running it repeatedly.
1412 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
1413 if (gd->arch.secure_ram & MEM_RESERVE_SECURE_MAINTAINED) {
1414 debug("No need to run again, skip %s\n", __func__);
1420 gd->bd->bi_dram[0].start = CONFIG_SYS_SDRAM_BASE;
1421 if (gd->ram_size > CONFIG_SYS_DDR_BLOCK1_SIZE) {
1422 gd->bd->bi_dram[0].size = CONFIG_SYS_DDR_BLOCK1_SIZE;
1423 gd->bd->bi_dram[1].start = CONFIG_SYS_DDR_BLOCK2_BASE;
1424 gd->bd->bi_dram[1].size = gd->ram_size -
1425 CONFIG_SYS_DDR_BLOCK1_SIZE;
1426 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
1427 if (gd->bi_dram[1].size > CONFIG_SYS_DDR_BLOCK2_SIZE) {
1428 gd->bd->bi_dram[2].start = CONFIG_SYS_DDR_BLOCK3_BASE;
1429 gd->bd->bi_dram[2].size = gd->bd->bi_dram[1].size -
1430 CONFIG_SYS_DDR_BLOCK2_SIZE;
1431 gd->bd->bi_dram[1].size = CONFIG_SYS_DDR_BLOCK2_SIZE;
1435 gd->bd->bi_dram[0].size = gd->ram_size;
1437 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
1438 if (gd->bd->bi_dram[0].size >
1439 CONFIG_SYS_MEM_RESERVE_SECURE) {
1440 gd->bd->bi_dram[0].size -=
1441 CONFIG_SYS_MEM_RESERVE_SECURE;
1442 gd->arch.secure_ram = gd->bd->bi_dram[0].start +
1443 gd->bd->bi_dram[0].size;
1444 gd->arch.secure_ram |= MEM_RESERVE_SECURE_MAINTAINED;
1445 gd->ram_size -= CONFIG_SYS_MEM_RESERVE_SECURE;
1447 #endif /* CONFIG_SYS_MEM_RESERVE_SECURE */
1449 #if defined(CONFIG_FSL_MC_ENET) && !defined(CONFIG_SPL_BUILD)
1450 /* Assign memory for MC */
1451 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
1452 if (gd->bd->bi_dram[2].size >=
1453 board_reserve_ram_top(gd->bd->bi_dram[2].size)) {
1454 gd->arch.resv_ram = gd->bd->bi_dram[2].start +
1455 gd->bd->bi_dram[2].size -
1456 board_reserve_ram_top(gd->bd->bi_dram[2].size);
1460 if (gd->bd->bi_dram[1].size >=
1461 board_reserve_ram_top(gd->bd->bi_dram[1].size)) {
1462 gd->arch.resv_ram = gd->bd->bi_dram[1].start +
1463 gd->bd->bi_dram[1].size -
1464 board_reserve_ram_top(gd->bd->bi_dram[1].size);
1465 } else if (gd->bd->bi_dram[0].size >
1466 board_reserve_ram_top(gd->bd->bi_dram[0].size)) {
1467 gd->arch.resv_ram = gd->bd->bi_dram[0].start +
1468 gd->bd->bi_dram[0].size -
1469 board_reserve_ram_top(gd->bd->bi_dram[0].size);
1472 #endif /* CONFIG_FSL_MC_ENET */
1474 #ifdef CONFIG_SYS_DP_DDR_BASE_PHY
1475 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
1476 #error "This SoC shouldn't have DP DDR"
1478 if (soc_has_dp_ddr()) {
1479 /* initialize DP-DDR here */
1482 * DDR controller use 0 as the base address for binding.
1483 * It is mapped to CONFIG_SYS_DP_DDR_BASE for core to access.
1485 dp_ddr_size = fsl_other_ddr_sdram(CONFIG_SYS_DP_DDR_BASE_PHY,
1487 CONFIG_DP_DDR_NUM_CTRLS,
1488 CONFIG_DP_DDR_DIMM_SLOTS_PER_CTLR,
1491 gd->bd->bi_dram[2].start = CONFIG_SYS_DP_DDR_BASE;
1492 gd->bd->bi_dram[2].size = dp_ddr_size;
1494 puts("Not detected");
1499 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
1500 debug("%s is called. gd->ram_size is reduced to %lu\n",
1501 __func__, (ulong)gd->ram_size);
1507 #if CONFIG_IS_ENABLED(EFI_LOADER)
1508 void efi_add_known_memory(void)
1511 phys_addr_t ram_start, start;
1512 phys_size_t ram_size;
1516 for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
1517 #ifdef CONFIG_SYS_DP_DDR_BASE_PHY
1518 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
1519 #error "This SoC shouldn't have DP DDR"
1522 continue; /* skip DP-DDR */
1524 ram_start = gd->bd->bi_dram[i].start;
1525 ram_size = gd->bd->bi_dram[i].size;
1526 #ifdef CONFIG_RESV_RAM
1527 if (gd->arch.resv_ram >= ram_start &&
1528 gd->arch.resv_ram < ram_start + ram_size)
1529 ram_size = gd->arch.resv_ram - ram_start;
1531 start = (ram_start + EFI_PAGE_MASK) & ~EFI_PAGE_MASK;
1532 pages = (ram_size + EFI_PAGE_MASK) >> EFI_PAGE_SHIFT;
1534 efi_add_memory_map(start, pages, EFI_CONVENTIONAL_MEMORY,
1541 * Before DDR size is known, early MMU table have DDR mapped as device memory
1542 * to avoid speculative access. To relocate U-Boot to DDR, "normal memory"
1543 * needs to be set for these mappings.
1544 * If a special case configures DDR with holes in the mapping, the holes need
1545 * to be marked as invalid. This is not implemented in this function.
1547 void update_early_mmu_table(void)
1549 if (!gd->arch.tlb_addr)
1552 if (gd->ram_size <= CONFIG_SYS_FSL_DRAM_SIZE1) {
1553 mmu_change_region_attr(
1554 CONFIG_SYS_SDRAM_BASE,
1556 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
1557 PTE_BLOCK_OUTER_SHARE |
1561 mmu_change_region_attr(
1562 CONFIG_SYS_SDRAM_BASE,
1563 CONFIG_SYS_DDR_BLOCK1_SIZE,
1564 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
1565 PTE_BLOCK_OUTER_SHARE |
1568 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
1569 #ifndef CONFIG_SYS_DDR_BLOCK2_SIZE
1570 #error "Missing CONFIG_SYS_DDR_BLOCK2_SIZE"
1572 if (gd->ram_size - CONFIG_SYS_DDR_BLOCK1_SIZE >
1573 CONFIG_SYS_DDR_BLOCK2_SIZE) {
1574 mmu_change_region_attr(
1575 CONFIG_SYS_DDR_BLOCK2_BASE,
1576 CONFIG_SYS_DDR_BLOCK2_SIZE,
1577 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
1578 PTE_BLOCK_OUTER_SHARE |
1581 mmu_change_region_attr(
1582 CONFIG_SYS_DDR_BLOCK3_BASE,
1584 CONFIG_SYS_DDR_BLOCK1_SIZE -
1585 CONFIG_SYS_DDR_BLOCK2_SIZE,
1586 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
1587 PTE_BLOCK_OUTER_SHARE |
1593 mmu_change_region_attr(
1594 CONFIG_SYS_DDR_BLOCK2_BASE,
1596 CONFIG_SYS_DDR_BLOCK1_SIZE,
1597 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
1598 PTE_BLOCK_OUTER_SHARE |
1605 __weak int dram_init(void)
1608 #if (!defined(CONFIG_SPL) && !defined(CONFIG_TFABOOT)) || \
1609 defined(CONFIG_SPL_BUILD)
1610 /* This will break-before-make MMU for DDR */
1611 update_early_mmu_table();