2 * Copyright 2014 Freescale Semiconductor, Inc.
4 * SPDX-License-Identifier: GPL-2.0+
9 #include <asm/system.h>
11 #include <asm/arch-fsl-lsch3/immap_lsch3.h>
14 DECLARE_GLOBAL_DATA_PTR;
16 void *get_spin_tbl_addr(void)
21 phys_addr_t determine_mp_bootpg(void)
23 return (phys_addr_t)&secondary_boot_code;
26 int fsl_lsch3_wake_seconday_cores(void)
28 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
29 struct ccsr_reset __iomem *rst = (void *)(CONFIG_SYS_FSL_RST_ADDR);
30 u32 cores, cpu_up_mask = 1;
32 u64 *table = get_spin_tbl_addr();
35 /* Clear spin table so that secondary processors
36 * observe the correct value after waking up from wfe.
38 memset(table, 0, CONFIG_MAX_CPUS*SPIN_TABLE_ELEM_SIZE);
39 flush_dcache_range((unsigned long)table,
40 (unsigned long)table +
41 (CONFIG_MAX_CPUS*SPIN_TABLE_ELEM_SIZE));
43 printf("Waking secondary cores to start from %lx\n", gd->relocaddr);
44 out_le32(&gur->bootlocptrh, (u32)(gd->relocaddr >> 32));
45 out_le32(&gur->bootlocptrl, (u32)gd->relocaddr);
46 out_le32(&gur->scratchrw[6], 1);
47 asm volatile("dsb st" : : : "memory");
49 asm volatile("dsb st" : : : "memory");
51 /* This is needed as a precautionary measure.
52 * If some code before this has accidentally released the secondary
53 * cores then the pre-bootloader code will trap them in a "wfe" unless
54 * the scratchrw[6] is set. In this case we need a sev here to get these
60 flush_dcache_range((unsigned long)table, (unsigned long)table +
61 CONFIG_MAX_CPUS * 64);
62 for (i = 1; i < CONFIG_MAX_CPUS; i++) {
63 if (table[i * WORDS_PER_SPIN_TABLE_ENTRY +
64 SPIN_TABLE_ELEM_STATUS_IDX])
65 cpu_up_mask |= 1 << i;
67 if (hweight32(cpu_up_mask) == hweight32(cores))
72 printf("Not all cores (0x%x) are up (0x%x)\n",
76 printf("All (%d) cores are up.\n", hweight32(cores));
81 int is_core_valid(unsigned int core)
83 return !!((1 << core) & cpu_mask());
86 int is_core_online(u64 cpu_id)
89 int pos = id_to_core(cpu_id);
90 table = (u64 *)get_spin_tbl_addr() + pos * WORDS_PER_SPIN_TABLE_ENTRY;
91 return table[SPIN_TABLE_ELEM_STATUS_IDX] == 1;
96 puts("Feature is not implemented.\n");
101 int cpu_disable(int nr)
103 puts("Feature is not implemented.\n");
108 int core_to_pos(int nr)
110 u32 cores = cpu_mask();
115 } else if (nr >= hweight32(cores)) {
116 puts("Not a valid core number.\n");
120 for (i = 1; i < 32; i++) {
121 if (is_core_valid(i)) {
131 int cpu_status(int nr)
137 table = (u64 *)get_spin_tbl_addr();
138 printf("table base @ 0x%p\n", table);
140 pos = core_to_pos(nr);
143 table = (u64 *)get_spin_tbl_addr() + pos *
144 WORDS_PER_SPIN_TABLE_ENTRY;
145 printf("table @ 0x%p\n", table);
146 printf(" addr - 0x%016llx\n",
147 table[SPIN_TABLE_ELEM_ENTRY_ADDR_IDX]);
148 printf(" status - 0x%016llx\n",
149 table[SPIN_TABLE_ELEM_STATUS_IDX]);
150 printf(" lpid - 0x%016llx\n",
151 table[SPIN_TABLE_ELEM_LPID_IDX]);
157 int cpu_release(int nr, int argc, char * const argv[])
160 u64 *table = (u64 *)get_spin_tbl_addr();
163 pos = core_to_pos(nr);
167 table += pos * WORDS_PER_SPIN_TABLE_ENTRY;
168 boot_addr = simple_strtoull(argv[0], NULL, 16);
169 table[SPIN_TABLE_ELEM_ENTRY_ADDR_IDX] = boot_addr;
170 flush_dcache_range((unsigned long)table,
171 (unsigned long)table + SPIN_TABLE_ELEM_SIZE);
172 asm volatile("dsb st");
173 smp_kick_all_cpus(); /* only those with entry addr set will run */