1 /* SPDX-License-Identifier: GPL-2.0+ */
3 * (C) Copyright 2014-2015 Freescale Semiconductor
5 * Extracted from armv8/start.S
9 #include <linux/linkage.h>
11 #include <asm/macro.h>
12 #include <asm/arch-fsl-layerscape/soc.h>
14 #include <asm/arch/mp.h>
16 #ifdef CONFIG_FSL_LSCH3
17 #include <asm/arch-fsl-layerscape/immap_lsch3.h>
19 #include <asm/u-boot.h>
22 * For LS1043a rev1.0, GIC base address align with 4k.
23 * For LS1043a rev1.1, if DCFG_GIC400_ALIGN[GIC_ADDR_BIT]
24 * is set, GIC base address align with 4K, or else align
27 * x0: the base address of GICD
28 * x1: the base address of GICC
35 #ifdef CONFIG_HAS_FEATURE_GIC64K_ALIGN
36 ldr x2, =DCFG_CCSR_SVR
40 ldr w4, =SVR_DEV(SVR_LS1043A)
46 ldr x2, =SCFG_GIC400_ALIGN
49 tbnz w2, #GIC_ADDR_BIT, 1f
50 ldr x0, =GICD_BASE_64K
52 ldr x1, =GICC_BASE_64K
57 ENDPROC(get_gic_offset)
59 ENTRY(smp_kick_all_cpus)
60 /* Kick secondary cpus up by SGI 0 interrupt */
61 #if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
62 mov x29, lr /* Save LR */
64 bl gic_kick_secondary_cpus
65 mov lr, x29 /* Restore LR */
68 ENDPROC(smp_kick_all_cpus)
72 mov x29, lr /* Save LR */
74 /* unmask SError and abort */
77 /* Set HCR_EL2[AMO] so SError @EL2 is taken */
79 orr x0, x0, #0x20 /* AMO */
83 switch_el x1, 1f, 100f, 100f /* skip if not in EL3 */
86 #if defined (CONFIG_SYS_FSL_HAS_CCN504)
88 /* Set Wuo bit for RN-I 20 */
89 #ifdef CONFIG_ARCH_LS2080A
90 ldr x0, =CCI_AUX_CONTROL_BASE(20)
95 * Set forced-order mode in RNI-6, RNI-20
96 * This is required for performance optimization on LS2088A
97 * LS2080A family does not support setting forced-order mode,
98 * so skip this operation for LS2080A family
102 ldr w1, =SVR_DEV(SVR_LS2080A)
106 ldr x0, =CCI_AUX_CONTROL_BASE(6)
109 ldr x0, =CCI_AUX_CONTROL_BASE(20)
115 /* Add fully-coherent masters to DVM domain */
117 ldr x1, =CCI_MN_RNF_NODEID_LIST
118 ldr x2, =CCI_MN_DVM_DOMAIN_CTL_SET
119 bl ccn504_add_masters_to_dvm
121 /* Set all RN-I ports to QoS of 15 */
122 ldr x0, =CCI_S0_QOS_CONTROL_BASE(0)
125 ldr x0, =CCI_S1_QOS_CONTROL_BASE(0)
128 ldr x0, =CCI_S2_QOS_CONTROL_BASE(0)
132 ldr x0, =CCI_S0_QOS_CONTROL_BASE(2)
135 ldr x0, =CCI_S1_QOS_CONTROL_BASE(2)
138 ldr x0, =CCI_S2_QOS_CONTROL_BASE(2)
142 ldr x0, =CCI_S0_QOS_CONTROL_BASE(6)
145 ldr x0, =CCI_S1_QOS_CONTROL_BASE(6)
148 ldr x0, =CCI_S2_QOS_CONTROL_BASE(6)
152 ldr x0, =CCI_S0_QOS_CONTROL_BASE(12)
155 ldr x0, =CCI_S1_QOS_CONTROL_BASE(12)
158 ldr x0, =CCI_S2_QOS_CONTROL_BASE(12)
162 ldr x0, =CCI_S0_QOS_CONTROL_BASE(16)
165 ldr x0, =CCI_S1_QOS_CONTROL_BASE(16)
168 ldr x0, =CCI_S2_QOS_CONTROL_BASE(16)
172 ldr x0, =CCI_S0_QOS_CONTROL_BASE(20)
175 ldr x0, =CCI_S1_QOS_CONTROL_BASE(20)
178 ldr x0, =CCI_S2_QOS_CONTROL_BASE(20)
181 #endif /* CONFIG_SYS_FSL_HAS_CCN504 */
184 /* Set the SMMU page size in the sACR register */
187 orr w0, w0, #1 << 16 /* set sACR.pagesize to indicate 64K page */
191 /* Initialize GIC Secure Bank Status */
192 #if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
193 branch_if_slave x0, 1f
199 bl gic_init_secure_percpu
200 #elif defined(CONFIG_GICV2)
202 bl gic_init_secure_percpu
207 branch_if_master x0, x1, 2f
209 #if defined(CONFIG_MP) && defined(CONFIG_ARMV8_MULTIENTRY)
210 ldr x0, =secondary_boot_func
215 switch_el x1, 1f, 100f, 100f /* skip if not in EL3 */
217 #ifdef CONFIG_FSL_TZPC_BP147
218 /* Set Non Secure access for all devices protected via TZPC */
219 ldr x1, =TZPCDECPROT_0_SET_BASE /* Decode Protection-0 Set Reg */
220 orr w0, w0, #1 << 3 /* DCFG_RESET is accessible from NS world */
227 #ifdef CONFIG_FSL_TZASC_400
229 * LS2080 and its personalities does not support TZASC
230 * So skip TZASC related operations
234 ldr w1, =SVR_DEV(SVR_LS2080A)
238 /* Set TZASC so that:
239 * a. We use only Region0 whose global secure write/read is EN
240 * b. We use only Region0 whose NSAID write/read is EN
242 * NOTE: As per the CCSR map doc, TZASC 3 and TZASC 4 are just
246 .macro tzasc_prog, xreg
255 ldr w0, [x1] /* Filter 0 Gate Keeper Register */
256 orr w0, w0, #1 << 0 /* Set open_request for Filter 0 */
262 ldr w0, [x1] /* Region-0 Attributes Register */
263 orr w0, w0, #1 << 31 /* Set Sec global write en, Bit[31] */
264 orr w0, w0, #1 << 30 /* Set Sec global read en, Bit[30] */
270 ldr w0, [x1] /* Region-0 Access Register */
271 mov w0, #0xFFFFFFFF /* Set nsaid_wr_en and nsaid_rd_en */
275 #ifdef CONFIG_FSL_TZASC_1
280 #ifdef CONFIG_FSL_TZASC_2
290 #ifdef CONFIG_ARCH_LS1046A
291 switch_el x1, 1f, 100f, 100f /* skip if not in EL3 */
293 /* Initialize the L2 RAM latency */
294 mrs x1, S3_1_c11_c0_2
296 /* Clear L2 Tag RAM latency and L2 Data RAM latency */
298 /* Set L2 data ram latency bits [2:0] */
300 /* set L2 tag ram latency bits [8:6] */
302 msr S3_1_c11_c0_2, x1
307 #if defined(CONFIG_FSL_LSCH2) && !defined(CONFIG_SPL_BUILD)
311 mov lr, x29 /* Restore LR */
313 ENDPROC(lowlevel_init)
315 #if defined(CONFIG_FSL_LSCH2) && !defined(CONFIG_SPL_BUILD)
316 ENTRY(fsl_ocram_init)
317 mov x28, lr /* Save LR */
319 bl fsl_ocram_clear_ecc_err
320 mov lr, x28 /* Restore LR */
322 ENDPROC(fsl_ocram_init)
324 ENTRY(fsl_clear_ocram)
326 ldr x0, =CONFIG_SYS_FSL_OCRAM_BASE
327 ldr x1, =(CONFIG_SYS_FSL_OCRAM_BASE + CONFIG_SYS_FSL_OCRAM_SIZE)
335 ENDPROC(fsl_clear_ocram)
337 ENTRY(fsl_ocram_clear_ecc_err)
338 /* OCRAM1/2 ECC status bit */
340 ldr x0, =DCSR_DCFG_SBEESR2
342 ldr x0, =DCSR_DCFG_MBEESR2
345 ENDPROC(fsl_ocram_init)
348 #ifdef CONFIG_FSL_LSCH3
351 ldr x1, =FSL_LSCH3_SVR
356 #ifdef CONFIG_SYS_FSL_HAS_CCN504
358 /* x0 has the desired status, return 0 for success, 1 for timeout
359 * clobber x1, x2, x3, x4, x6, x7
362 mov x7, #0 /* flag for timeout */
363 mrs x3, cntpct_el0 /* read timer */
364 add x3, x3, #1200 /* timeout after 100 microseconds */
366 movk x0, #0x420, lsl #16 /* HNF0_PSTATE_STATUS */
367 mov w6, #8 /* HN-F node count */
370 cmp x2, x1 /* check status */
375 mov x7, #1 /* timeout */
378 add x0, x0, #0x10000 /* move to next node */
386 /* x0 has the desired state, clobber x1, x2, x6 */
388 /* power state to SFONLY */
389 mov w6, #8 /* HN-F node count */
391 movk x0, #0x420, lsl #16 /* HNF0_PSTATE_REQ */
392 1: /* set pstate to sfonly */
394 and x2, x2, #0xfffffffffffffffc /* & HNFPSTAT_MASK */
397 add x0, x0, #0x10000 /* move to next node */
403 ENTRY(__asm_flush_l3_dcache)
405 * Return status in x0
407 * timeout 1 for setting SFONLY, 2 for FAM, 3 for both
413 mov x0, #0x1 /* HNFPSTAT_SFONLY */
416 mov x0, #0x4 /* SFONLY status */
419 mov x8, #1 /* timeout */
422 mov x0, #0x3 /* HNFPSTAT_FAM */
425 mov x0, #0xc /* FAM status */
433 ENDPROC(__asm_flush_l3_dcache)
434 #endif /* CONFIG_SYS_FSL_HAS_CCN504 */
437 /* Keep literals not used by the secondary boot code outside it */
440 /* Using 64 bit alignment since the spin table is accessed as data */
442 .global secondary_boot_code
443 /* Secondary Boot Code starts here */
447 .space CONFIG_MAX_CPUS*SPIN_TABLE_ELEM_SIZE
450 ENTRY(secondary_boot_func)
453 * MPIDR[1:0] = AFF0_CPUID <- Core ID (0,1)
454 * MPIDR[7:2] = AFF0_RES
455 * MPIDR[15:8] = AFF1_CLUSTERID <- Cluster ID (0,1,2,3)
456 * MPIDR[23:16] = AFF2_CLUSTERID
458 * MPIDR[29:25] = RES0
461 * MPIDR[39:32] = AFF3
463 * Linear Processor ID (LPID) calculation from MPIDR_EL1:
464 * (We only use AFF0_CPUID and AFF1_CLUSTERID for now
465 * until AFF2_CLUSTERID and AFF3 have non-zero values)
467 * LPID = MPIDR[15:8] | MPIDR[1:0]
472 orr x10, x2, x1, lsl #2 /* x10 has LPID */
473 ubfm x9, x0, #0, #15 /* x9 contains MPIDR[15:0] */
475 * offset of the spin table element for this core from start of spin
476 * table (each elem is padded to 64 bytes)
479 ldr x0, =__spin_table
480 /* physical address of this cpus spin table element */
483 ldr x0, =__real_cntfrq
485 msr cntfrq_el0, x0 /* set with real frequency */
486 str x9, [x11, #16] /* LPID */
488 str x4, [x11, #8] /* STATUS */
490 #if defined(CONFIG_GICV3)
491 gic_wait_for_interrupt_m x0
492 #elif defined(CONFIG_GICV2)
495 gic_wait_for_interrupt_m x0, w1
502 #ifndef CONFIG_ARMV8_SWITCH_TO_EL1
507 tbz x1, #25, cpu_is_le
508 rev x0, x0 /* BE to LE conversion */
513 #ifdef CONFIG_ARMV8_SWITCH_TO_EL1
514 adr x4, secondary_switch_to_el1
515 ldr x5, =ES_TO_AARCH64
518 ldr x5, =ES_TO_AARCH32
520 bl secondary_switch_to_el2
523 #ifdef CONFIG_ARMV8_SWITCH_TO_EL1
524 adr x4, secondary_switch_to_el1
528 ldr x5, =ES_TO_AARCH64
529 bl secondary_switch_to_el2
531 ENDPROC(secondary_boot_func)
533 ENTRY(secondary_switch_to_el2)
534 switch_el x6, 1f, 0f, 0f
536 1: armv8_switch_to_el2_m x4, x5, x6
537 ENDPROC(secondary_switch_to_el2)
539 ENTRY(secondary_switch_to_el1)
543 orr x10, x2, x1, lsl #2 /* x10 has LPID */
546 ldr x0, =__spin_table
547 /* physical address of this cpus spin table element */
555 ldr x5, =ES_TO_AARCH32
558 2: ldr x5, =ES_TO_AARCH64
561 switch_el x6, 0f, 1f, 0f
563 1: armv8_switch_to_el1_m x4, x5, x6
564 ENDPROC(secondary_switch_to_el1)
566 /* Ensure that the literals used by the secondary boot code are
567 * assembled within it (this is required so that we can protect
568 * this area with a single memreserve region
572 /* 64 bit alignment for elements accessed as data */
574 .global __real_cntfrq
576 .quad COUNTER_FREQUENCY
577 .globl __secondary_boot_code_size
578 .type __secondary_boot_code_size, %object
579 /* Secondary Boot Code ends here */
580 __secondary_boot_code_size:
581 .quad .-secondary_boot_code