+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Cache-handling routined for MIPS CPUs
*
* Copyright (c) 2003 Wolfgang Denk <wd@denx.de>
- *
- * SPDX-License-Identifier: GPL-2.0+
*/
#include <asm-offsets.h>
#include <asm/mipsregs.h>
#include <asm/addrspace.h>
#include <asm/cacheops.h>
-
-#ifndef CONFIG_SYS_MIPS_CACHE_MODE
-#define CONFIG_SYS_MIPS_CACHE_MODE CONF_CM_CACHABLE_NONCOHERENT
-#endif
-
-#define INDEX_BASE CKSEG0
+#include <asm/cm.h>
.macro f_fill64 dst, offset, val
LONG_S \val, (\offset + 0 * LONGSIZE)(\dst)
10:
.set pop
.endm
+
/*
* mips_cache_reset - low level initialisation of the primary caches
*
* with good parity is available. This routine will initialise an area of
* memory starting at location zero to be used as a source of parity.
*
+ * Note that this function does not follow the standard calling convention &
+ * may clobber typically callee-saved registers.
+ *
* RETURNS: N/A
*
*/
+#define R_RETURN s0
+#define R_IC_SIZE s1
+#define R_IC_LINE s2
+#define R_DC_SIZE s3
+#define R_DC_LINE s4
+#define R_L2_SIZE s5
+#define R_L2_LINE s6
+#define R_L2_BYPASSED s7
+#define R_L2_L2C t8
LEAF(mips_cache_reset)
-#ifdef CONFIG_SYS_ICACHE_SIZE
- li t2, CONFIG_SYS_ICACHE_SIZE
- li t8, CONFIG_SYS_CACHELINE_SIZE
+ move R_RETURN, ra
+
+#ifdef CONFIG_MIPS_L2_CACHE
+ /*
+ * For there to be an L2 present, Config2 must be present. If it isn't
+ * then we proceed knowing there's no L2 cache.
+ */
+ move R_L2_SIZE, zero
+ move R_L2_LINE, zero
+ move R_L2_BYPASSED, zero
+ move R_L2_L2C, zero
+ mfc0 t0, CP0_CONFIG, 1
+ bgez t0, l2_probe_done
+
+ /*
+ * From MIPSr6 onwards the L2 cache configuration might not be reported
+ * by Config2. The Config5.L2C bit indicates whether this is the case,
+ * and if it is then we need knowledge of where else to look. For cores
+ * from Imagination Technologies this is a CM GCR.
+ */
+# if __mips_isa_rev >= 6
+ /* Check that Config5 exists */
+ mfc0 t0, CP0_CONFIG, 2
+ bgez t0, l2_probe_cop0
+ mfc0 t0, CP0_CONFIG, 3
+ bgez t0, l2_probe_cop0
+ mfc0 t0, CP0_CONFIG, 4
+ bgez t0, l2_probe_cop0
+
+ /* Check Config5.L2C is set */
+ mfc0 t0, CP0_CONFIG, 5
+ and R_L2_L2C, t0, MIPS_CONF5_L2C
+ beqz R_L2_L2C, l2_probe_cop0
+
+ /* Config5.L2C is set */
+# ifdef CONFIG_MIPS_CM
+ /* The CM will provide L2 configuration */
+ PTR_LI t0, CKSEG1ADDR(CONFIG_MIPS_CM_BASE)
+ lw t1, GCR_L2_CONFIG(t0)
+ bgez t1, l2_probe_done
+
+ ext R_L2_LINE, t1, \
+ GCR_L2_CONFIG_LINESZ_SHIFT, GCR_L2_CONFIG_LINESZ_BITS
+ beqz R_L2_LINE, l2_probe_done
+ li t2, 2
+ sllv R_L2_LINE, t2, R_L2_LINE
+
+ ext t2, t1, GCR_L2_CONFIG_ASSOC_SHIFT, GCR_L2_CONFIG_ASSOC_BITS
+ addiu t2, t2, 1
+ mul R_L2_SIZE, R_L2_LINE, t2
+
+ ext t2, t1, GCR_L2_CONFIG_SETSZ_SHIFT, GCR_L2_CONFIG_SETSZ_BITS
+ sllv R_L2_SIZE, R_L2_SIZE, t2
+ li t2, 64
+ mul R_L2_SIZE, R_L2_SIZE, t2
+
+ /* Bypass the L2 cache so that we can init the L1s early */
+ or t1, t1, GCR_L2_CONFIG_BYPASS
+ sw t1, GCR_L2_CONFIG(t0)
+ sync
+ li R_L2_BYPASSED, 1
+
+ /* Zero the L2 tag registers */
+ sw zero, GCR_L2_TAG_ADDR(t0)
+ sw zero, GCR_L2_TAG_ADDR_UPPER(t0)
+ sw zero, GCR_L2_TAG_STATE(t0)
+ sw zero, GCR_L2_TAG_STATE_UPPER(t0)
+ sw zero, GCR_L2_DATA(t0)
+ sw zero, GCR_L2_DATA_UPPER(t0)
+ sync
+# else
+ /* We don't know how to retrieve L2 configuration on this system */
+# endif
+ b l2_probe_done
+# endif
+
+ /*
+ * For pre-r6 systems, or r6 systems with Config5.L2C==0, probe the L2
+ * cache configuration from the cop0 Config2 register.
+ */
+l2_probe_cop0:
+ mfc0 t0, CP0_CONFIG, 2
+
+ srl R_L2_LINE, t0, MIPS_CONF2_SL_SHF
+ andi R_L2_LINE, R_L2_LINE, MIPS_CONF2_SL >> MIPS_CONF2_SL_SHF
+ beqz R_L2_LINE, l2_probe_done
+ li t1, 2
+ sllv R_L2_LINE, t1, R_L2_LINE
+
+ srl t1, t0, MIPS_CONF2_SA_SHF
+ andi t1, t1, MIPS_CONF2_SA >> MIPS_CONF2_SA_SHF
+ addiu t1, t1, 1
+ mul R_L2_SIZE, R_L2_LINE, t1
+
+ srl t1, t0, MIPS_CONF2_SS_SHF
+ andi t1, t1, MIPS_CONF2_SS >> MIPS_CONF2_SS_SHF
+ sllv R_L2_SIZE, R_L2_SIZE, t1
+ li t1, 64
+ mul R_L2_SIZE, R_L2_SIZE, t1
+
+ /* Attempt to bypass the L2 so that we can init the L1s early */
+ or t0, t0, MIPS_CONF2_L2B
+ mtc0 t0, CP0_CONFIG, 2
+ ehb
+ mfc0 t0, CP0_CONFIG, 2
+ and R_L2_BYPASSED, t0, MIPS_CONF2_L2B
+
+ /* Zero the L2 tag registers */
+ mtc0 zero, CP0_TAGLO, 4
+ ehb
+l2_probe_done:
+#endif
+
+#ifndef CONFIG_SYS_CACHE_SIZE_AUTO
+ li R_IC_SIZE, CONFIG_SYS_ICACHE_SIZE
+ li R_IC_LINE, CONFIG_SYS_ICACHE_LINE_SIZE
#else
- l1_info t2, t8, MIPS_CONF1_IA_SHF
+ l1_info R_IC_SIZE, R_IC_LINE, MIPS_CONF1_IA_SHF
#endif
-#ifdef CONFIG_SYS_DCACHE_SIZE
- li t3, CONFIG_SYS_DCACHE_SIZE
- li t9, CONFIG_SYS_CACHELINE_SIZE
+#ifndef CONFIG_SYS_CACHE_SIZE_AUTO
+ li R_DC_SIZE, CONFIG_SYS_DCACHE_SIZE
+ li R_DC_LINE, CONFIG_SYS_DCACHE_LINE_SIZE
#else
- l1_info t3, t9, MIPS_CONF1_DA_SHF
+ l1_info R_DC_SIZE, R_DC_LINE, MIPS_CONF1_DA_SHF
#endif
#ifdef CONFIG_SYS_MIPS_CACHE_INIT_RAM_LOAD
/* Determine the largest L1 cache size */
-#if defined(CONFIG_SYS_ICACHE_SIZE) && defined(CONFIG_SYS_DCACHE_SIZE)
+#ifndef CONFIG_SYS_CACHE_SIZE_AUTO
#if CONFIG_SYS_ICACHE_SIZE > CONFIG_SYS_DCACHE_SIZE
li v0, CONFIG_SYS_ICACHE_SIZE
#else
li v0, CONFIG_SYS_DCACHE_SIZE
#endif
#else
- move v0, t2
- sltu t1, t2, t3
- movn v0, t3, t1
+ move v0, R_IC_SIZE
+ sltu t1, R_IC_SIZE, R_DC_SIZE
+ movn v0, R_DC_SIZE, t1
#endif
/*
* Now clear that much memory starting from zero.
*/
- PTR_LI a0, CKSEG1
+ PTR_LI a0, CKSEG1ADDR(CONFIG_MIPS_CACHE_INDEX_BASE)
PTR_ADDU a1, a0, v0
2: PTR_ADDIU a0, 64
f_fill64 a0, -64, zero
#endif /* CONFIG_SYS_MIPS_CACHE_INIT_RAM_LOAD */
+#ifdef CONFIG_MIPS_L2_CACHE
+ /*
+ * If the L2 is bypassed, init the L1 first so that we can execute the
+ * rest of the cache initialisation using the L1 instruction cache.
+ */
+ bnez R_L2_BYPASSED, l1_init
+
+l2_init:
+ PTR_LI t0, CKSEG0ADDR(CONFIG_MIPS_CACHE_INDEX_BASE)
+ PTR_ADDU t1, t0, R_L2_SIZE
+1: cache INDEX_STORE_TAG_SD, 0(t0)
+ PTR_ADDU t0, t0, R_L2_LINE
+ bne t0, t1, 1b
+
+ /*
+ * If the L2 was bypassed then we already initialised the L1s before
+ * the L2, so we are now done.
+ */
+ bnez R_L2_BYPASSED, l2_unbypass
+#endif
+
/*
* The TagLo registers used depend upon the CPU implementation, but the
* architecture requires that it is safe for software to write to both
* TagLo selects 0 & 2 covering supported cases.
*/
+l1_init:
mtc0 zero, CP0_TAGLO
mtc0 zero, CP0_TAGLO, 2
+ ehb
/*
* The caches are probably in an indeterminate state, so we force good
/*
* Initialize the I-cache first,
*/
- blez t2, 1f
- PTR_LI t0, INDEX_BASE
- PTR_ADDU t1, t0, t2
+ blez R_IC_SIZE, 1f
+ PTR_LI t0, CKSEG0ADDR(CONFIG_MIPS_CACHE_INDEX_BASE)
+ PTR_ADDU t1, t0, R_IC_SIZE
/* clear tag to invalidate */
- cache_loop t0, t1, t8, INDEX_STORE_TAG_I
+ cache_loop t0, t1, R_IC_LINE, INDEX_STORE_TAG_I
#ifdef CONFIG_SYS_MIPS_CACHE_INIT_RAM_LOAD
/* fill once, so data field parity is correct */
- PTR_LI t0, INDEX_BASE
- cache_loop t0, t1, t8, FILL
+ PTR_LI t0, CKSEG0ADDR(CONFIG_MIPS_CACHE_INDEX_BASE)
+ cache_loop t0, t1, R_IC_LINE, FILL
/* invalidate again - prudent but not strictly neccessary */
- PTR_LI t0, INDEX_BASE
- cache_loop t0, t1, t8, INDEX_STORE_TAG_I
+ PTR_LI t0, CKSEG0ADDR(CONFIG_MIPS_CACHE_INDEX_BASE)
+ cache_loop t0, t1, R_IC_LINE, INDEX_STORE_TAG_I
#endif
+ sync
+
+ /*
+ * Enable use of the I-cache by setting Config.K0. The code for this
+ * must be executed from KSEG1. Jump from KSEG0 to KSEG1 to do this.
+ * Jump back to KSEG0 after caches are enabled and insert an
+ * instruction hazard barrier.
+ */
+ PTR_LA t0, change_k0_cca
+ li t1, CPHYSADDR(~0)
+ and t0, t0, t1
+ PTR_LI t1, CKSEG1
+ or t0, t0, t1
+ li a0, CONF_CM_CACHABLE_NONCOHERENT
+ jalr.hb t0
/*
* then initialize D-cache.
*/
-1: blez t3, 3f
- PTR_LI t0, INDEX_BASE
- PTR_ADDU t1, t0, t3
+1: blez R_DC_SIZE, 3f
+ PTR_LI t0, CKSEG0ADDR(CONFIG_MIPS_CACHE_INDEX_BASE)
+ PTR_ADDU t1, t0, R_DC_SIZE
/* clear all tags */
- cache_loop t0, t1, t9, INDEX_STORE_TAG_D
+ cache_loop t0, t1, R_DC_LINE, INDEX_STORE_TAG_D
#ifdef CONFIG_SYS_MIPS_CACHE_INIT_RAM_LOAD
/* load from each line (in cached space) */
- PTR_LI t0, INDEX_BASE
+ PTR_LI t0, CKSEG0ADDR(CONFIG_MIPS_CACHE_INDEX_BASE)
2: LONG_L zero, 0(t0)
- PTR_ADDU t0, t9
+ PTR_ADDU t0, R_DC_LINE
bne t0, t1, 2b
/* clear all tags */
- PTR_LI t0, INDEX_BASE
- cache_loop t0, t1, t9, INDEX_STORE_TAG_D
+ PTR_LI t0, CKSEG0ADDR(CONFIG_MIPS_CACHE_INDEX_BASE)
+ cache_loop t0, t1, R_DC_LINE, INDEX_STORE_TAG_D
#endif
+3:
-3: jr ra
- END(mips_cache_reset)
+#ifdef CONFIG_MIPS_L2_CACHE
+ /* If the L2 isn't bypassed then we're done */
+ beqz R_L2_BYPASSED, return
-/*
- * dcache_status - get cache status
- *
- * RETURNS: 0 - cache disabled; 1 - cache enabled
- *
- */
-LEAF(dcache_status)
- mfc0 t0, CP0_CONFIG
- li t1, CONF_CM_UNCACHED
- andi t0, t0, CONF_CM_CMASK
- move v0, zero
- beq t0, t1, 2f
- li v0, 1
-2: jr ra
- END(dcache_status)
+ /* The L2 is bypassed - go initialise it */
+ b l2_init
-/*
- * dcache_disable - disable cache
- *
- * RETURNS: N/A
- *
- */
-LEAF(dcache_disable)
- mfc0 t0, CP0_CONFIG
- li t1, -8
- and t0, t0, t1
- ori t0, t0, CONF_CM_UNCACHED
- mtc0 t0, CP0_CONFIG
- jr ra
- END(dcache_disable)
+l2_unbypass:
+# if __mips_isa_rev >= 6
+ beqz R_L2_L2C, 1f
-/*
- * dcache_enable - enable cache
- *
- * RETURNS: N/A
- *
- */
-LEAF(dcache_enable)
- mfc0 t0, CP0_CONFIG
- ori t0, CONF_CM_CMASK
- xori t0, CONF_CM_CMASK
- ori t0, CONFIG_SYS_MIPS_CACHE_MODE
- mtc0 t0, CP0_CONFIG
- jr ra
- END(dcache_enable)
+ li t0, CKSEG1ADDR(CONFIG_MIPS_CM_BASE)
+ lw t1, GCR_L2_CONFIG(t0)
+ xor t1, t1, GCR_L2_CONFIG_BYPASS
+ sw t1, GCR_L2_CONFIG(t0)
+ sync
+ ehb
+ b 2f
+# endif
+1: mfc0 t0, CP0_CONFIG, 2
+ xor t0, t0, MIPS_CONF2_L2B
+ mtc0 t0, CP0_CONFIG, 2
+ ehb
+
+2:
+# ifdef CONFIG_MIPS_CM
+ /* Config3 must exist for a CM to be present */
+ mfc0 t0, CP0_CONFIG, 1
+ bgez t0, 2f
+ mfc0 t0, CP0_CONFIG, 2
+ bgez t0, 2f
+
+ /* Check Config3.CMGCR to determine CM presence */
+ mfc0 t0, CP0_CONFIG, 3
+ and t0, t0, MIPS_CONF3_CMGCR
+ beqz t0, 2f
+
+ /* Change Config.K0 to a coherent CCA */
+ PTR_LA t0, change_k0_cca
+ li a0, CONF_CM_CACHABLE_COW
+ jalr t0
+
+ /*
+ * Join the coherent domain such that the caches of this core are kept
+ * coherent with those of other cores.
+ */
+ PTR_LI t0, CKSEG1ADDR(CONFIG_MIPS_CM_BASE)
+ lw t1, GCR_REV(t0)
+ li t2, GCR_REV_CM3
+ li t3, GCR_Cx_COHERENCE_EN
+ bge t1, t2, 1f
+ li t3, GCR_Cx_COHERENCE_DOM_EN
+1: sw t3, GCR_Cx_COHERENCE(t0)
+ ehb
+2:
+# endif
+#endif
+
+return:
+ /* Ensure all cache operations complete before returning */
+ sync
+ jr R_RETURN
+ END(mips_cache_reset)
+
+LEAF(change_k0_cca)
+ mfc0 t0, CP0_CONFIG
+#if __mips_isa_rev >= 2
+ ins t0, a0, 0, 3
+#else
+ xor a0, a0, t0
+ andi a0, a0, CONF_CM_CMASK
+ xor a0, a0, t0
+#endif
+ mtc0 a0, CP0_CONFIG
+
+ jr.hb ra
+ END(change_k0_cca)