2 * (C) Copyright 2014-2015 Freescale Semiconductor
4 * SPDX-License-Identifier: GPL-2.0+
6 * Extracted from armv8/start.S
10 #include <linux/linkage.h>
12 #include <asm/macro.h>
14 #include <asm/arch/mp.h>
18 mov x29, lr /* Save LR */
20 #ifdef CONFIG_FSL_LSCH3
22 /* Set Wuo bit for RN-I 20 */
24 ldr x0, =CCI_AUX_CONTROL_BASE(20)
29 /* Add fully-coherent masters to DVM domain */
31 ldr x1, =CCI_MN_RNF_NODEID_LIST
32 ldr x2, =CCI_MN_DVM_DOMAIN_CTL_SET
33 bl ccn504_add_masters_to_dvm
35 /* Set all RN-I ports to QoS of 15 */
36 ldr x0, =CCI_S0_QOS_CONTROL_BASE(0)
39 ldr x0, =CCI_S1_QOS_CONTROL_BASE(0)
42 ldr x0, =CCI_S2_QOS_CONTROL_BASE(0)
46 ldr x0, =CCI_S0_QOS_CONTROL_BASE(2)
49 ldr x0, =CCI_S1_QOS_CONTROL_BASE(2)
52 ldr x0, =CCI_S2_QOS_CONTROL_BASE(2)
56 ldr x0, =CCI_S0_QOS_CONTROL_BASE(6)
59 ldr x0, =CCI_S1_QOS_CONTROL_BASE(6)
62 ldr x0, =CCI_S2_QOS_CONTROL_BASE(6)
66 ldr x0, =CCI_S0_QOS_CONTROL_BASE(12)
69 ldr x0, =CCI_S1_QOS_CONTROL_BASE(12)
72 ldr x0, =CCI_S2_QOS_CONTROL_BASE(12)
76 ldr x0, =CCI_S0_QOS_CONTROL_BASE(16)
79 ldr x0, =CCI_S1_QOS_CONTROL_BASE(16)
82 ldr x0, =CCI_S2_QOS_CONTROL_BASE(16)
86 ldr x0, =CCI_S0_QOS_CONTROL_BASE(20)
89 ldr x0, =CCI_S1_QOS_CONTROL_BASE(20)
92 ldr x0, =CCI_S2_QOS_CONTROL_BASE(20)
98 /* Set the SMMU page size in the sACR register */
101 orr w0, w0, #1 << 16 /* set sACR.pagesize to indicate 64K page */
105 /* Initialize GIC Secure Bank Status */
106 #if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
107 branch_if_slave x0, 1f
113 bl gic_init_secure_percpu
114 #elif defined(CONFIG_GICV2)
117 bl gic_init_secure_percpu
121 branch_if_master x0, x1, 2f
123 #if defined(CONFIG_MP) && defined(CONFIG_ARMV8_MULTIENTRY)
124 ldr x0, =secondary_boot_func
129 #ifdef CONFIG_FSL_TZPC_BP147
130 /* Set Non Secure access for all devices protected via TZPC */
131 ldr x1, =TZPCDECPROT_0_SET_BASE /* Decode Protection-0 Set Reg */
132 orr w0, w0, #1 << 3 /* DCFG_RESET is accessible from NS world */
139 #ifdef CONFIG_FSL_TZASC_400
140 /* Set TZASC so that:
141 * a. We use only Region0 whose global secure write/read is EN
142 * b. We use only Region0 whose NSAID write/read is EN
144 * NOTE: As per the CCSR map doc, TZASC 3 and TZASC 4 are just
147 ldr x1, =TZASC_GATE_KEEPER(0)
148 ldr x0, [x1] /* Filter 0 Gate Keeper Register */
149 orr x0, x0, #1 << 0 /* Set open_request for Filter 0 */
152 ldr x1, =TZASC_GATE_KEEPER(1)
153 ldr x0, [x1] /* Filter 0 Gate Keeper Register */
154 orr x0, x0, #1 << 0 /* Set open_request for Filter 0 */
157 ldr x1, =TZASC_REGION_ATTRIBUTES_0(0)
158 ldr x0, [x1] /* Region-0 Attributes Register */
159 orr x0, x0, #1 << 31 /* Set Sec global write en, Bit[31] */
160 orr x0, x0, #1 << 30 /* Set Sec global read en, Bit[30] */
163 ldr x1, =TZASC_REGION_ATTRIBUTES_0(1)
164 ldr x0, [x1] /* Region-1 Attributes Register */
165 orr x0, x0, #1 << 31 /* Set Sec global write en, Bit[31] */
166 orr x0, x0, #1 << 30 /* Set Sec global read en, Bit[30] */
169 ldr x1, =TZASC_REGION_ID_ACCESS_0(0)
170 ldr w0, [x1] /* Region-0 Access Register */
171 mov w0, #0xFFFFFFFF /* Set nsaid_wr_en and nsaid_rd_en */
174 ldr x1, =TZASC_REGION_ID_ACCESS_0(1)
175 ldr w0, [x1] /* Region-1 Attributes Register */
176 mov w0, #0xFFFFFFFF /* Set nsaid_wr_en and nsaid_rd_en */
182 mov lr, x29 /* Restore LR */
184 ENDPROC(lowlevel_init)
187 /* x0 has the desired status, return 0 for success, 1 for timeout
188 * clobber x1, x2, x3, x4, x6, x7
191 mov x7, #0 /* flag for timeout */
192 mrs x3, cntpct_el0 /* read timer */
193 add x3, x3, #1200 /* timeout after 100 microseconds */
195 movk x0, #0x420, lsl #16 /* HNF0_PSTATE_STATUS */
196 mov w6, #8 /* HN-F node count */
199 cmp x2, x1 /* check status */
204 mov x7, #1 /* timeout */
207 add x0, x0, #0x10000 /* move to next node */
215 /* x0 has the desired state, clobber x1, x2, x6 */
217 /* power state to SFONLY */
218 mov w6, #8 /* HN-F node count */
220 movk x0, #0x420, lsl #16 /* HNF0_PSTATE_REQ */
221 1: /* set pstate to sfonly */
223 and x2, x2, #0xfffffffffffffffc /* & HNFPSTAT_MASK */
226 add x0, x0, #0x10000 /* move to next node */
232 ENTRY(__asm_flush_l3_cache)
234 * Return status in x0
236 * tmeout 1 for setting SFONLY, 2 for FAM, 3 for both
242 mov x0, #0x1 /* HNFPSTAT_SFONLY */
245 mov x0, #0x4 /* SFONLY status */
248 mov x8, #1 /* timeout */
251 mov x0, #0x3 /* HNFPSTAT_FAM */
254 mov x0, #0xc /* FAM status */
262 ENDPROC(__asm_flush_l3_cache)
265 /* Keep literals not used by the secondary boot code outside it */
268 /* Using 64 bit alignment since the spin table is accessed as data */
270 .global secondary_boot_code
271 /* Secondary Boot Code starts here */
275 .space CONFIG_MAX_CPUS*SPIN_TABLE_ELEM_SIZE
278 ENTRY(secondary_boot_func)
281 * MPIDR[1:0] = AFF0_CPUID <- Core ID (0,1)
282 * MPIDR[7:2] = AFF0_RES
283 * MPIDR[15:8] = AFF1_CLUSTERID <- Cluster ID (0,1,2,3)
284 * MPIDR[23:16] = AFF2_CLUSTERID
286 * MPIDR[29:25] = RES0
289 * MPIDR[39:32] = AFF3
291 * Linear Processor ID (LPID) calculation from MPIDR_EL1:
292 * (We only use AFF0_CPUID and AFF1_CLUSTERID for now
293 * until AFF2_CLUSTERID and AFF3 have non-zero values)
295 * LPID = MPIDR[15:8] | MPIDR[1:0]
300 orr x10, x2, x1, lsl #2 /* x10 has LPID */
301 ubfm x9, x0, #0, #15 /* x9 contains MPIDR[15:0] */
303 * offset of the spin table element for this core from start of spin
304 * table (each elem is padded to 64 bytes)
307 ldr x0, =__spin_table
308 /* physical address of this cpus spin table element */
311 ldr x0, =__real_cntfrq
313 msr cntfrq_el0, x0 /* set with real frequency */
314 str x9, [x11, #16] /* LPID */
316 str x4, [x11, #8] /* STATUS */
318 #if defined(CONFIG_GICV3)
319 gic_wait_for_interrupt_m x0
320 #elif defined(CONFIG_GICV2)
322 gic_wait_for_interrupt_m x0, w1
325 bl secondary_switch_to_el2
326 #ifdef CONFIG_ARMV8_SWITCH_TO_EL1
327 bl secondary_switch_to_el1
334 #ifndef CONFIG_ARMV8_SWITCH_TO_EL1
339 tbz x1, #25, cpu_is_le
340 rev x0, x0 /* BE to LE conversion */
342 br x0 /* branch to the given address */
343 ENDPROC(secondary_boot_func)
345 ENTRY(secondary_switch_to_el2)
346 switch_el x0, 1f, 0f, 0f
348 1: armv8_switch_to_el2_m x0
349 ENDPROC(secondary_switch_to_el2)
351 ENTRY(secondary_switch_to_el1)
352 switch_el x0, 0f, 1f, 0f
354 1: armv8_switch_to_el1_m x0, x1
355 ENDPROC(secondary_switch_to_el1)
357 /* Ensure that the literals used by the secondary boot code are
358 * assembled within it (this is required so that we can protect
359 * this area with a single memreserve region
363 /* 64 bit alignment for elements accessed as data */
365 .global __real_cntfrq
367 .quad COUNTER_FREQUENCY
368 .globl __secondary_boot_code_size
369 .type __secondary_boot_code_size, %object
370 /* Secondary Boot Code ends here */
371 __secondary_boot_code_size:
372 .quad .-secondary_boot_code