2 * (C) Copyright 2014-2015 Freescale Semiconductor
4 * SPDX-License-Identifier: GPL-2.0+
6 * Extracted from armv8/start.S
10 #include <linux/linkage.h>
12 #include <asm/macro.h>
14 #include <asm/arch/mp.h>
18 mov x29, lr /* Save LR */
20 #ifdef CONFIG_FSL_LSCH3
22 /* Set Wuo bit for RN-I 20 */
24 ldr x0, =CCI_AUX_CONTROL_BASE(20)
29 /* Add fully-coherent masters to DVM domain */
31 ldr x1, =CCI_MN_RNF_NODEID_LIST
32 ldr x2, =CCI_MN_DVM_DOMAIN_CTL_SET
33 bl ccn504_add_masters_to_dvm
35 /* Set all RN-I ports to QoS of 15 */
36 ldr x0, =CCI_S0_QOS_CONTROL_BASE(0)
39 ldr x0, =CCI_S1_QOS_CONTROL_BASE(0)
42 ldr x0, =CCI_S2_QOS_CONTROL_BASE(0)
46 ldr x0, =CCI_S0_QOS_CONTROL_BASE(2)
49 ldr x0, =CCI_S1_QOS_CONTROL_BASE(2)
52 ldr x0, =CCI_S2_QOS_CONTROL_BASE(2)
56 ldr x0, =CCI_S0_QOS_CONTROL_BASE(6)
59 ldr x0, =CCI_S1_QOS_CONTROL_BASE(6)
62 ldr x0, =CCI_S2_QOS_CONTROL_BASE(6)
66 ldr x0, =CCI_S0_QOS_CONTROL_BASE(12)
69 ldr x0, =CCI_S1_QOS_CONTROL_BASE(12)
72 ldr x0, =CCI_S2_QOS_CONTROL_BASE(12)
76 ldr x0, =CCI_S0_QOS_CONTROL_BASE(16)
79 ldr x0, =CCI_S1_QOS_CONTROL_BASE(16)
82 ldr x0, =CCI_S2_QOS_CONTROL_BASE(16)
86 ldr x0, =CCI_S0_QOS_CONTROL_BASE(20)
89 ldr x0, =CCI_S1_QOS_CONTROL_BASE(20)
92 ldr x0, =CCI_S2_QOS_CONTROL_BASE(20)
98 /* Set the SMMU page size in the sACR register */
101 orr w0, w0, #1 << 16 /* set sACR.pagesize to indicate 64K page */
105 /* Initialize GIC Secure Bank Status */
106 #if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
107 branch_if_slave x0, 1f
113 bl gic_init_secure_percpu
114 #elif defined(CONFIG_GICV2)
117 bl gic_init_secure_percpu
121 branch_if_master x0, x1, 2f
123 #if defined(CONFIG_MP) && defined(CONFIG_ARMV8_MULTIENTRY)
124 ldr x0, =secondary_boot_func
129 #ifdef CONFIG_FSL_TZPC_BP147
130 /* Set Non Secure access for all devices protected via TZPC */
131 ldr x1, =TZPCDECPROT_0_SET_BASE /* Decode Protection-0 Set Reg */
132 orr w0, w0, #1 << 3 /* DCFG_RESET is accessible from NS world */
139 #ifdef CONFIG_FSL_TZASC_400
140 /* Set TZASC so that:
141 * a. We use only Region0 whose global secure write/read is EN
142 * b. We use only Region0 whose NSAID write/read is EN
144 * NOTE: As per the CCSR map doc, TZASC 3 and TZASC 4 are just
147 ldr x1, =TZASC_GATE_KEEPER(0)
148 ldr x0, [x1] /* Filter 0 Gate Keeper Register */
149 orr x0, x0, #1 << 0 /* Set open_request for Filter 0 */
152 ldr x1, =TZASC_GATE_KEEPER(1)
153 ldr x0, [x1] /* Filter 0 Gate Keeper Register */
154 orr x0, x0, #1 << 0 /* Set open_request for Filter 0 */
157 ldr x1, =TZASC_REGION_ATTRIBUTES_0(0)
158 ldr x0, [x1] /* Region-0 Attributes Register */
159 orr x0, x0, #1 << 31 /* Set Sec global write en, Bit[31] */
160 orr x0, x0, #1 << 30 /* Set Sec global read en, Bit[30] */
163 ldr x1, =TZASC_REGION_ATTRIBUTES_0(1)
164 ldr x0, [x1] /* Region-1 Attributes Register */
165 orr x0, x0, #1 << 31 /* Set Sec global write en, Bit[31] */
166 orr x0, x0, #1 << 30 /* Set Sec global read en, Bit[30] */
169 ldr x1, =TZASC_REGION_ID_ACCESS_0(0)
170 ldr w0, [x1] /* Region-0 Access Register */
171 mov w0, #0xFFFFFFFF /* Set nsaid_wr_en and nsaid_rd_en */
174 ldr x1, =TZASC_REGION_ID_ACCESS_0(1)
175 ldr w0, [x1] /* Region-1 Attributes Register */
176 mov w0, #0xFFFFFFFF /* Set nsaid_wr_en and nsaid_rd_en */
182 mov lr, x29 /* Restore LR */
184 ENDPROC(lowlevel_init)
186 #ifdef CONFIG_FSL_LSCH3
188 /* x0 has the desired status, return 0 for success, 1 for timeout
189 * clobber x1, x2, x3, x4, x6, x7
192 mov x7, #0 /* flag for timeout */
193 mrs x3, cntpct_el0 /* read timer */
194 add x3, x3, #1200 /* timeout after 100 microseconds */
196 movk x0, #0x420, lsl #16 /* HNF0_PSTATE_STATUS */
197 mov w6, #8 /* HN-F node count */
200 cmp x2, x1 /* check status */
205 mov x7, #1 /* timeout */
208 add x0, x0, #0x10000 /* move to next node */
216 /* x0 has the desired state, clobber x1, x2, x6 */
218 /* power state to SFONLY */
219 mov w6, #8 /* HN-F node count */
221 movk x0, #0x420, lsl #16 /* HNF0_PSTATE_REQ */
222 1: /* set pstate to sfonly */
224 and x2, x2, #0xfffffffffffffffc /* & HNFPSTAT_MASK */
227 add x0, x0, #0x10000 /* move to next node */
233 ENTRY(__asm_flush_l3_cache)
235 * Return status in x0
237 * tmeout 1 for setting SFONLY, 2 for FAM, 3 for both
243 mov x0, #0x1 /* HNFPSTAT_SFONLY */
246 mov x0, #0x4 /* SFONLY status */
249 mov x8, #1 /* timeout */
252 mov x0, #0x3 /* HNFPSTAT_FAM */
255 mov x0, #0xc /* FAM status */
263 ENDPROC(__asm_flush_l3_cache)
267 /* Keep literals not used by the secondary boot code outside it */
270 /* Using 64 bit alignment since the spin table is accessed as data */
272 .global secondary_boot_code
273 /* Secondary Boot Code starts here */
277 .space CONFIG_MAX_CPUS*SPIN_TABLE_ELEM_SIZE
280 ENTRY(secondary_boot_func)
283 * MPIDR[1:0] = AFF0_CPUID <- Core ID (0,1)
284 * MPIDR[7:2] = AFF0_RES
285 * MPIDR[15:8] = AFF1_CLUSTERID <- Cluster ID (0,1,2,3)
286 * MPIDR[23:16] = AFF2_CLUSTERID
288 * MPIDR[29:25] = RES0
291 * MPIDR[39:32] = AFF3
293 * Linear Processor ID (LPID) calculation from MPIDR_EL1:
294 * (We only use AFF0_CPUID and AFF1_CLUSTERID for now
295 * until AFF2_CLUSTERID and AFF3 have non-zero values)
297 * LPID = MPIDR[15:8] | MPIDR[1:0]
302 orr x10, x2, x1, lsl #2 /* x10 has LPID */
303 ubfm x9, x0, #0, #15 /* x9 contains MPIDR[15:0] */
305 * offset of the spin table element for this core from start of spin
306 * table (each elem is padded to 64 bytes)
309 ldr x0, =__spin_table
310 /* physical address of this cpus spin table element */
313 ldr x0, =__real_cntfrq
315 msr cntfrq_el0, x0 /* set with real frequency */
316 str x9, [x11, #16] /* LPID */
318 str x4, [x11, #8] /* STATUS */
320 #if defined(CONFIG_GICV3)
321 gic_wait_for_interrupt_m x0
322 #elif defined(CONFIG_GICV2)
324 gic_wait_for_interrupt_m x0, w1
327 bl secondary_switch_to_el2
328 #ifdef CONFIG_ARMV8_SWITCH_TO_EL1
329 bl secondary_switch_to_el1
336 #ifndef CONFIG_ARMV8_SWITCH_TO_EL1
341 tbz x1, #25, cpu_is_le
342 rev x0, x0 /* BE to LE conversion */
344 br x0 /* branch to the given address */
345 ENDPROC(secondary_boot_func)
347 ENTRY(secondary_switch_to_el2)
348 switch_el x0, 1f, 0f, 0f
350 1: armv8_switch_to_el2_m x0
351 ENDPROC(secondary_switch_to_el2)
353 ENTRY(secondary_switch_to_el1)
354 switch_el x0, 0f, 1f, 0f
356 1: armv8_switch_to_el1_m x0, x1
357 ENDPROC(secondary_switch_to_el1)
359 /* Ensure that the literals used by the secondary boot code are
360 * assembled within it (this is required so that we can protect
361 * this area with a single memreserve region
365 /* 64 bit alignment for elements accessed as data */
367 .global __real_cntfrq
369 .quad COUNTER_FREQUENCY
370 .globl __secondary_boot_code_size
371 .type __secondary_boot_code_size, %object
372 /* Secondary Boot Code ends here */
373 __secondary_boot_code_size:
374 .quad .-secondary_boot_code