3 * David Feng <fenghua@phytium.com.cn>
5 * This file is based on sample code from ARMv8 ARM.
7 * SPDX-License-Identifier: GPL-2.0+
10 #include <asm-offsets.h>
12 #include <asm/macro.h>
13 #include <asm/system.h>
14 #include <linux/linkage.h>
17 * void __asm_dcache_level(level)
19 * flush or invalidate one level cache.
22 * x1: 0 clean & invalidate, 1 invalidate only
25 .pushsection .text.__asm_dcache_level, "ax"
26 ENTRY(__asm_dcache_level)
28 msr csselr_el1, x12 /* select cache level */
29 isb /* sync change of cssidr_el1 */
30 mrs x6, ccsidr_el1 /* read the new cssidr_el1 */
31 and x2, x6, #7 /* x2 <- log2(cache line size)-4 */
32 add x2, x2, #4 /* x2 <- log2(cache line size) */
34 and x3, x3, x6, lsr #3 /* x3 <- max number of #ways */
35 clz w5, w3 /* bit position of #ways */
37 and x4, x4, x6, lsr #13 /* x4 <- max number of #sets */
38 /* x12 <- cache level << 1 */
39 /* x2 <- line length offset */
40 /* x3 <- number of cache ways - 1 */
41 /* x4 <- number of cache sets - 1 */
42 /* x5 <- bit position of #ways */
45 mov x6, x3 /* x6 <- working copy of #ways */
48 orr x9, x12, x7 /* map way and level to cisw value */
50 orr x9, x9, x7 /* map set number to cisw value */
54 1: dc cisw, x9 /* clean & invalidate by set/way */
55 2: subs x6, x6, #1 /* decrement the way */
57 subs x4, x4, #1 /* decrement the set */
61 ENDPROC(__asm_dcache_level)
65 * void __asm_flush_dcache_all(int invalidate_only)
67 * x0: 0 clean & invalidate, 1 invalidate only
69 * flush or invalidate all data cache by SET/WAY.
71 .pushsection .text.__asm_dcache_all, "ax"
72 ENTRY(__asm_dcache_all)
75 mrs x10, clidr_el1 /* read clidr_el1 */
77 and x11, x11, #0x7 /* x11 <- loc */
78 cbz x11, finished /* if loc is 0, exit */
80 mov x0, #0 /* start flush at cache level 0 */
81 /* x0 <- cache level */
82 /* x10 <- clidr_el1 */
84 /* x15 <- return address */
88 add x12, x12, x0 /* x0 <- tripled cache level */
90 and x12, x12, #7 /* x12 <- cache type */
92 b.lt skip /* skip if no cache or icache */
93 bl __asm_dcache_level /* x1 = 0 flush, 1 invalidate */
95 add x0, x0, #1 /* increment cache level */
100 msr csselr_el1, x0 /* restore csselr_el1 */
107 ENDPROC(__asm_dcache_all)
110 .pushsection .text.__asm_flush_dcache_all, "ax"
111 ENTRY(__asm_flush_dcache_all)
114 ENDPROC(__asm_flush_dcache_all)
117 .pushsection .text.__asm_invalidate_dcache_all, "ax"
118 ENTRY(__asm_invalidate_dcache_all)
121 ENDPROC(__asm_invalidate_dcache_all)
125 * void __asm_flush_dcache_range(start, end)
127 * clean & invalidate data cache in the range
132 .pushsection .text.__asm_flush_dcache_range, "ax"
133 ENTRY(__asm_flush_dcache_range)
138 lsl x2, x2, x3 /* cache line size */
140 /* x2 <- minimal cache line size in cache system */
143 1: dc civac, x0 /* clean & invalidate data or unified cache */
149 ENDPROC(__asm_flush_dcache_range)
152 * void __asm_invalidate_dcache_range(start, end)
154 * invalidate data cache in the range
159 .pushsection .text.__asm_invalidate_dcache_range, "ax"
160 ENTRY(__asm_invalidate_dcache_range)
162 ubfm x3, x3, #16, #19
164 lsl x2, x2, x3 /* cache line size */
166 /* x2 <- minimal cache line size in cache system */
169 1: dc ivac, x0 /* invalidate data or unified cache */
175 ENDPROC(__asm_invalidate_dcache_range)
179 * void __asm_invalidate_icache_all(void)
181 * invalidate all tlb entries.
183 .pushsection .text.__asm_invalidate_icache_all, "ax"
184 ENTRY(__asm_invalidate_icache_all)
188 ENDPROC(__asm_invalidate_icache_all)
191 .pushsection .text.__asm_invalidate_l3_dcache, "ax"
192 ENTRY(__asm_invalidate_l3_dcache)
193 mov x0, #0 /* return status as success */
195 ENDPROC(__asm_invalidate_l3_dcache)
196 .weak __asm_invalidate_l3_dcache
199 .pushsection .text.__asm_flush_l3_dcache, "ax"
200 ENTRY(__asm_flush_l3_dcache)
201 mov x0, #0 /* return status as success */
203 ENDPROC(__asm_flush_l3_dcache)
204 .weak __asm_flush_l3_dcache
207 .pushsection .text.__asm_invalidate_l3_icache, "ax"
208 ENTRY(__asm_invalidate_l3_icache)
209 mov x0, #0 /* return status as success */
211 ENDPROC(__asm_invalidate_l3_icache)
212 .weak __asm_invalidate_l3_icache
216 * void __asm_switch_ttbr(ulong new_ttbr)
218 * Safely switches to a new page table.
220 .pushsection .text.__asm_switch_ttbr, "ax"
221 ENTRY(__asm_switch_ttbr)
222 /* x2 = SCTLR (alive throghout the function) */
223 switch_el x4, 3f, 2f, 1f
231 /* Unset CR_M | CR_C | CR_I from SCTLR to disable all caches */
232 movn x1, #(CR_M | CR_C | CR_I)
234 switch_el x4, 3f, 2f, 1f
242 /* This call only clobbers x30 (lr) and x9 (unused) */
244 bl __asm_invalidate_tlb_all
246 /* From here on we're running safely with caches disabled */
248 /* Set TTBR to our first argument */
249 switch_el x4, 3f, 2f, 1f
257 /* Restore original SCTLR and thus enable caches again */
258 switch_el x4, 3f, 2f, 1f
267 ENDPROC(__asm_switch_ttbr)