2 * (C) Copyright 2014-2015 Freescale Semiconductor
4 * SPDX-License-Identifier: GPL-2.0+
6 * Extracted from armv8/start.S
10 #include <linux/linkage.h>
12 #include <asm/macro.h>
14 #include <asm/arch/mp.h>
18 mov x29, lr /* Save LR */
20 #ifdef CONFIG_FSL_LSCH3
21 /* Add fully-coherent masters to DVM domain */
23 ldr x1, =CCI_MN_RNF_NODEID_LIST
24 ldr x2, =CCI_MN_DVM_DOMAIN_CTL_SET
25 bl ccn504_add_masters_to_dvm
27 /* Set all RN-I ports to QoS of 15 */
28 ldr x0, =CCI_S0_QOS_CONTROL_BASE(0)
31 ldr x0, =CCI_S1_QOS_CONTROL_BASE(0)
34 ldr x0, =CCI_S2_QOS_CONTROL_BASE(0)
38 ldr x0, =CCI_S0_QOS_CONTROL_BASE(2)
41 ldr x0, =CCI_S1_QOS_CONTROL_BASE(2)
44 ldr x0, =CCI_S2_QOS_CONTROL_BASE(2)
48 ldr x0, =CCI_S0_QOS_CONTROL_BASE(6)
51 ldr x0, =CCI_S1_QOS_CONTROL_BASE(6)
54 ldr x0, =CCI_S2_QOS_CONTROL_BASE(6)
58 ldr x0, =CCI_S0_QOS_CONTROL_BASE(12)
61 ldr x0, =CCI_S1_QOS_CONTROL_BASE(12)
64 ldr x0, =CCI_S2_QOS_CONTROL_BASE(12)
68 ldr x0, =CCI_S0_QOS_CONTROL_BASE(16)
71 ldr x0, =CCI_S1_QOS_CONTROL_BASE(16)
74 ldr x0, =CCI_S2_QOS_CONTROL_BASE(16)
78 ldr x0, =CCI_S0_QOS_CONTROL_BASE(20)
81 ldr x0, =CCI_S1_QOS_CONTROL_BASE(20)
84 ldr x0, =CCI_S2_QOS_CONTROL_BASE(20)
89 /* Set the SMMU page size in the sACR register */
92 orr w0, w0, #1 << 16 /* set sACR.pagesize to indicate 64K page */
95 /* Initialize GIC Secure Bank Status */
96 #if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
97 branch_if_slave x0, 1f
103 bl gic_init_secure_percpu
104 #elif defined(CONFIG_GICV2)
107 bl gic_init_secure_percpu
111 branch_if_master x0, x1, 2f
113 #if defined(CONFIG_MP) && defined(CONFIG_ARMV8_MULTIENTRY)
114 ldr x0, =secondary_boot_func
119 #ifdef CONFIG_FSL_TZPC_BP147
120 /* Set Non Secure access for all devices protected via TZPC */
121 ldr x1, =TZPCDECPROT_0_SET_BASE /* Decode Protection-0 Set Reg */
122 orr w0, w0, #1 << 3 /* DCFG_RESET is accessible from NS world */
129 #ifdef CONFIG_FSL_TZASC_400
130 /* Set TZASC so that:
131 * a. We use only Region0 whose global secure write/read is EN
132 * b. We use only Region0 whose NSAID write/read is EN
134 * NOTE: As per the CCSR map doc, TZASC 3 and TZASC 4 are just
137 ldr x1, =TZASC_GATE_KEEPER(0)
138 ldr x0, [x1] /* Filter 0 Gate Keeper Register */
139 orr x0, x0, #1 << 0 /* Set open_request for Filter 0 */
142 ldr x1, =TZASC_GATE_KEEPER(1)
143 ldr x0, [x1] /* Filter 0 Gate Keeper Register */
144 orr x0, x0, #1 << 0 /* Set open_request for Filter 0 */
147 ldr x1, =TZASC_REGION_ATTRIBUTES_0(0)
148 ldr x0, [x1] /* Region-0 Attributes Register */
149 orr x0, x0, #1 << 31 /* Set Sec global write en, Bit[31] */
150 orr x0, x0, #1 << 30 /* Set Sec global read en, Bit[30] */
153 ldr x1, =TZASC_REGION_ATTRIBUTES_0(1)
154 ldr x0, [x1] /* Region-1 Attributes Register */
155 orr x0, x0, #1 << 31 /* Set Sec global write en, Bit[31] */
156 orr x0, x0, #1 << 30 /* Set Sec global read en, Bit[30] */
159 ldr x1, =TZASC_REGION_ID_ACCESS_0(0)
160 ldr w0, [x1] /* Region-0 Access Register */
161 mov w0, #0xFFFFFFFF /* Set nsaid_wr_en and nsaid_rd_en */
164 ldr x1, =TZASC_REGION_ID_ACCESS_0(1)
165 ldr w0, [x1] /* Region-1 Attributes Register */
166 mov w0, #0xFFFFFFFF /* Set nsaid_wr_en and nsaid_rd_en */
172 mov lr, x29 /* Restore LR */
174 ENDPROC(lowlevel_init)
177 /* x0 has the desired status, return 0 for success, 1 for timeout
178 * clobber x1, x2, x3, x4, x6, x7
181 mov x7, #0 /* flag for timeout */
182 mrs x3, cntpct_el0 /* read timer */
183 add x3, x3, #1200 /* timeout after 100 microseconds */
185 movk x0, #0x420, lsl #16 /* HNF0_PSTATE_STATUS */
186 mov w6, #8 /* HN-F node count */
189 cmp x2, x1 /* check status */
194 mov x7, #1 /* timeout */
197 add x0, x0, #0x10000 /* move to next node */
205 /* x0 has the desired state, clobber x1, x2, x6 */
207 /* power state to SFONLY */
208 mov w6, #8 /* HN-F node count */
210 movk x0, #0x420, lsl #16 /* HNF0_PSTATE_REQ */
211 1: /* set pstate to sfonly */
213 and x2, x2, #0xfffffffffffffffc /* & HNFPSTAT_MASK */
216 add x0, x0, #0x10000 /* move to next node */
222 ENTRY(__asm_flush_l3_cache)
224 * Return status in x0
226 * tmeout 1 for setting SFONLY, 2 for FAM, 3 for both
232 mov x0, #0x1 /* HNFPSTAT_SFONLY */
235 mov x0, #0x4 /* SFONLY status */
238 mov x8, #1 /* timeout */
241 mov x0, #0x3 /* HNFPSTAT_FAM */
244 mov x0, #0xc /* FAM status */
252 ENDPROC(__asm_flush_l3_cache)
255 /* Keep literals not used by the secondary boot code outside it */
258 /* Using 64 bit alignment since the spin table is accessed as data */
260 .global secondary_boot_code
261 /* Secondary Boot Code starts here */
265 .space CONFIG_MAX_CPUS*SPIN_TABLE_ELEM_SIZE
268 ENTRY(secondary_boot_func)
271 * MPIDR[1:0] = AFF0_CPUID <- Core ID (0,1)
272 * MPIDR[7:2] = AFF0_RES
273 * MPIDR[15:8] = AFF1_CLUSTERID <- Cluster ID (0,1,2,3)
274 * MPIDR[23:16] = AFF2_CLUSTERID
276 * MPIDR[29:25] = RES0
279 * MPIDR[39:32] = AFF3
281 * Linear Processor ID (LPID) calculation from MPIDR_EL1:
282 * (We only use AFF0_CPUID and AFF1_CLUSTERID for now
283 * until AFF2_CLUSTERID and AFF3 have non-zero values)
285 * LPID = MPIDR[15:8] | MPIDR[1:0]
290 orr x10, x2, x1, lsl #2 /* x10 has LPID */
291 ubfm x9, x0, #0, #15 /* x9 contains MPIDR[15:0] */
293 * offset of the spin table element for this core from start of spin
294 * table (each elem is padded to 64 bytes)
297 ldr x0, =__spin_table
298 /* physical address of this cpus spin table element */
301 ldr x0, =__real_cntfrq
303 msr cntfrq_el0, x0 /* set with real frequency */
304 str x9, [x11, #16] /* LPID */
306 str x4, [x11, #8] /* STATUS */
308 #if defined(CONFIG_GICV3)
309 gic_wait_for_interrupt_m x0
310 #elif defined(CONFIG_GICV2)
312 gic_wait_for_interrupt_m x0, w1
315 bl secondary_switch_to_el2
316 #ifdef CONFIG_ARMV8_SWITCH_TO_EL1
317 bl secondary_switch_to_el1
324 #ifndef CONFIG_ARMV8_SWITCH_TO_EL1
329 tbz x1, #25, cpu_is_le
330 rev x0, x0 /* BE to LE conversion */
332 br x0 /* branch to the given address */
333 ENDPROC(secondary_boot_func)
335 ENTRY(secondary_switch_to_el2)
336 switch_el x0, 1f, 0f, 0f
338 1: armv8_switch_to_el2_m x0
339 ENDPROC(secondary_switch_to_el2)
341 ENTRY(secondary_switch_to_el1)
342 switch_el x0, 0f, 1f, 0f
344 1: armv8_switch_to_el1_m x0, x1
345 ENDPROC(secondary_switch_to_el1)
347 /* Ensure that the literals used by the secondary boot code are
348 * assembled within it (this is required so that we can protect
349 * this area with a single memreserve region
353 /* 64 bit alignment for elements accessed as data */
355 .global __real_cntfrq
357 .quad COUNTER_FREQUENCY
358 .globl __secondary_boot_code_size
359 .type __secondary_boot_code_size, %object
360 /* Secondary Boot Code ends here */
361 __secondary_boot_code_size:
362 .quad .-secondary_boot_code