#ifndef __ASM_ARM_SYSTEM_H
#define __ASM_ARM_SYSTEM_H
+#include <common.h>
+#include <linux/compiler.h>
+
+#ifdef CONFIG_ARM64
+
+/*
+ * SCTLR_EL1/SCTLR_EL2/SCTLR_EL3 bits definitions
+ */
+#define CR_M (1 << 0) /* MMU enable */
+#define CR_A (1 << 1) /* Alignment abort enable */
+#define CR_C (1 << 2) /* Dcache enable */
+#define CR_SA (1 << 3) /* Stack Alignment Check Enable */
+#define CR_I (1 << 12) /* Icache enable */
+#define CR_WXN (1 << 19) /* Write Permision Imply XN */
+#define CR_EE (1 << 25) /* Exception (Big) Endian */
+
+#ifndef __ASSEMBLY__
+
+u64 get_page_table_size(void);
+#define PGTABLE_SIZE get_page_table_size()
+
+/* 2MB granularity */
+#define MMU_SECTION_SHIFT 21
+#define MMU_SECTION_SIZE (1 << MMU_SECTION_SHIFT)
+
+/* These constants need to be synced to the MT_ types in asm/armv8/mmu.h */
+enum dcache_option {
+ DCACHE_OFF = 0 << 2,
+ DCACHE_WRITETHROUGH = 3 << 2,
+ DCACHE_WRITEBACK = 4 << 2,
+ DCACHE_WRITEALLOC = 4 << 2,
+};
+
+#define isb() \
+ ({asm volatile( \
+ "isb" : : : "memory"); \
+ })
+
+#define wfi() \
+ ({asm volatile( \
+ "wfi" : : : "memory"); \
+ })
+
+static inline unsigned int current_el(void)
+{
+ unsigned int el;
+ asm volatile("mrs %0, CurrentEL" : "=r" (el) : : "cc");
+ return el >> 2;
+}
+
+static inline unsigned int get_sctlr(void)
+{
+ unsigned int el, val;
+
+ el = current_el();
+ if (el == 1)
+ asm volatile("mrs %0, sctlr_el1" : "=r" (val) : : "cc");
+ else if (el == 2)
+ asm volatile("mrs %0, sctlr_el2" : "=r" (val) : : "cc");
+ else
+ asm volatile("mrs %0, sctlr_el3" : "=r" (val) : : "cc");
+
+ return val;
+}
+
+static inline void set_sctlr(unsigned int val)
+{
+ unsigned int el;
+
+ el = current_el();
+ if (el == 1)
+ asm volatile("msr sctlr_el1, %0" : : "r" (val) : "cc");
+ else if (el == 2)
+ asm volatile("msr sctlr_el2, %0" : : "r" (val) : "cc");
+ else
+ asm volatile("msr sctlr_el3, %0" : : "r" (val) : "cc");
+
+ asm volatile("isb");
+}
+
+static inline unsigned long read_mpidr(void)
+{
+ unsigned long val;
+
+ asm volatile("mrs %0, mpidr_el1" : "=r" (val));
+
+ return val;
+}
+
+#define BSP_COREID 0
+
+void __asm_flush_dcache_all(void);
+void __asm_invalidate_dcache_all(void);
+void __asm_flush_dcache_range(u64 start, u64 end);
+void __asm_invalidate_tlb_all(void);
+void __asm_invalidate_icache_all(void);
+int __asm_flush_l3_cache(void);
+void __asm_switch_ttbr(u64 new_ttbr);
+
+void armv8_switch_to_el2(void);
+void armv8_switch_to_el1(void);
+void gic_init(void);
+void gic_send_sgi(unsigned long sgino);
+void wait_for_wakeup(void);
+void protect_secure_region(void);
+void smp_kick_all_cpus(void);
+
+void flush_l3_cache(void);
+
+/*
+ *Issue a hypervisor call in accordance with ARM "SMC Calling convention",
+ * DEN0028A
+ *
+ * @args: input and output arguments
+ *
+ */
+void hvc_call(struct pt_regs *args);
+
+/*
+ *Issue a secure monitor call in accordance with ARM "SMC Calling convention",
+ * DEN0028A
+ *
+ * @args: input and output arguments
+ *
+ */
+void smc_call(struct pt_regs *args);
+
+void __noreturn psci_system_reset(bool smc);
+
+#endif /* __ASSEMBLY__ */
+
+#else /* CONFIG_ARM64 */
+
#ifdef __KERNEL__
#define CPU_ARCH_UNKNOWN 0
#define CR_AFE (1 << 29) /* Access flag enable */
#define CR_TE (1 << 30) /* Thumb exception enable */
+#if defined(CONFIG_ARMV7_LPAE) && !defined(PGTABLE_SIZE)
+#define PGTABLE_SIZE (4096 * 5)
+#elif !defined(PGTABLE_SIZE)
+#define PGTABLE_SIZE (4096 * 4)
+#endif
+
/*
* This is used to ensure the compiler did actually allocate the register we
* asked it for some inline assembly sequences. Apparently we can't trust
#ifndef __ASSEMBLY__
+/**
+ * save_boot_params() - Save boot parameters before starting reset sequence
+ *
+ * If you provide this function it will be called immediately U-Boot starts,
+ * both for SPL and U-Boot proper.
+ *
+ * All registers are unchanged from U-Boot entry. No registers need be
+ * preserved.
+ *
+ * This is not a normal C function. There is no stack. Return by branching to
+ * save_boot_params_ret.
+ *
+ * void save_boot_params(u32 r0, u32 r1, u32 r2, u32 r3);
+ */
+
+/**
+ * save_boot_params_ret() - Return from save_boot_params()
+ *
+ * If you provide save_boot_params(), then you should jump back to this
+ * function when done. Try to preserve all registers.
+ *
+ * If your implementation of save_boot_params() is in C then it is acceptable
+ * to simply call save_boot_params_ret() at the end of your function. Since
+ * there is no link register set up, you cannot just exit the function. U-Boot
+ * will return to the (initialised) value of lr, and likely crash/hang.
+ *
+ * If your implementation of save_boot_params() is in assembler then you
+ * should use 'b' or 'bx' to return to save_boot_params_ret.
+ */
+void save_boot_params_ret(void);
+
#define isb() __asm__ __volatile__ ("" : : : "memory")
#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t");
#define wfi()
#endif
+static inline unsigned long get_cpsr(void)
+{
+ unsigned long cpsr;
+
+ asm volatile("mrs %0, cpsr" : "=r"(cpsr): );
+ return cpsr;
+}
+
+static inline int is_hyp(void)
+{
+#ifdef CONFIG_ARMV7_LPAE
+ /* HYP mode requires LPAE ... */
+ return ((get_cpsr() & 0x1f) == 0x1a);
+#else
+ /* ... so without LPAE support we can optimize all hyp code away */
+ return 0;
+#endif
+}
+
static inline unsigned int get_cr(void)
{
unsigned int val;
- asm("mrc p15, 0, %0, c1, c0, 0 @ get CR" : "=r" (val) : : "cc");
+
+ if (is_hyp())
+ asm volatile("mrc p15, 4, %0, c1, c0, 0 @ get CR" : "=r" (val)
+ :
+ : "cc");
+ else
+ asm volatile("mrc p15, 0, %0, c1, c0, 0 @ get CR" : "=r" (val)
+ :
+ : "cc");
return val;
}
static inline void set_cr(unsigned int val)
{
- asm volatile("mcr p15, 0, %0, c1, c0, 0 @ set CR"
+ if (is_hyp())
+ asm volatile("mcr p15, 4, %0, c1, c0, 0 @ set CR" :
+ : "r" (val)
+ : "cc");
+ else
+ asm volatile("mcr p15, 0, %0, c1, c0, 0 @ set CR" :
+ : "r" (val)
+ : "cc");
+ isb();
+}
+
+static inline unsigned int get_dacr(void)
+{
+ unsigned int val;
+ asm("mrc p15, 0, %0, c3, c0, 0 @ get DACR" : "=r" (val) : : "cc");
+ return val;
+}
+
+static inline void set_dacr(unsigned int val)
+{
+ asm volatile("mcr p15, 0, %0, c3, c0, 0 @ set DACR"
: : "r" (val) : "cc");
isb();
}
+#ifdef CONFIG_ARMV7_LPAE
+/* Long-Descriptor Translation Table Level 1/2 Bits */
+#define TTB_SECT_XN_MASK (1ULL << 54)
+#define TTB_SECT_NG_MASK (1 << 11)
+#define TTB_SECT_AF (1 << 10)
+#define TTB_SECT_SH_MASK (3 << 8)
+#define TTB_SECT_NS_MASK (1 << 5)
+#define TTB_SECT_AP (1 << 6)
+/* Note: TTB AP bits are set elsewhere */
+#define TTB_SECT_MAIR(x) ((x & 0x7) << 2) /* Index into MAIR */
+#define TTB_SECT (1 << 0)
+#define TTB_PAGETABLE (3 << 0)
+
+/* TTBCR flags */
+#define TTBCR_EAE (1 << 31)
+#define TTBCR_T0SZ(x) ((x) << 0)
+#define TTBCR_T1SZ(x) ((x) << 16)
+#define TTBCR_USING_TTBR0 (TTBCR_T0SZ(0) | TTBCR_T1SZ(0))
+#define TTBCR_IRGN0_NC (0 << 8)
+#define TTBCR_IRGN0_WBWA (1 << 8)
+#define TTBCR_IRGN0_WT (2 << 8)
+#define TTBCR_IRGN0_WBNWA (3 << 8)
+#define TTBCR_IRGN0_MASK (3 << 8)
+#define TTBCR_ORGN0_NC (0 << 10)
+#define TTBCR_ORGN0_WBWA (1 << 10)
+#define TTBCR_ORGN0_WT (2 << 10)
+#define TTBCR_ORGN0_WBNWA (3 << 10)
+#define TTBCR_ORGN0_MASK (3 << 10)
+#define TTBCR_SHARED_NON (0 << 12)
+#define TTBCR_SHARED_OUTER (2 << 12)
+#define TTBCR_SHARED_INNER (3 << 12)
+#define TTBCR_EPD0 (0 << 7)
+
+/*
+ * Memory types
+ */
+#define MEMORY_ATTRIBUTES ((0x00 << (0 * 8)) | (0x88 << (1 * 8)) | \
+ (0xcc << (2 * 8)) | (0xff << (3 * 8)))
+
+/* options available for data cache on each page */
+enum dcache_option {
+ DCACHE_OFF = TTB_SECT | TTB_SECT_MAIR(0),
+ DCACHE_WRITETHROUGH = TTB_SECT | TTB_SECT_MAIR(1),
+ DCACHE_WRITEBACK = TTB_SECT | TTB_SECT_MAIR(2),
+ DCACHE_WRITEALLOC = TTB_SECT | TTB_SECT_MAIR(3),
+};
+#elif defined(CONFIG_CPU_V7)
+/* Short-Descriptor Translation Table Level 1 Bits */
+#define TTB_SECT_NS_MASK (1 << 19)
+#define TTB_SECT_NG_MASK (1 << 17)
+#define TTB_SECT_S_MASK (1 << 16)
+/* Note: TTB AP bits are set elsewhere */
+#define TTB_SECT_AP (3 << 10)
+#define TTB_SECT_TEX(x) ((x & 0x7) << 12)
+#define TTB_SECT_DOMAIN(x) ((x & 0xf) << 5)
+#define TTB_SECT_XN_MASK (1 << 4)
+#define TTB_SECT_C_MASK (1 << 3)
+#define TTB_SECT_B_MASK (1 << 2)
+#define TTB_SECT (2 << 0)
+
+/* options available for data cache on each page */
+enum dcache_option {
+ DCACHE_OFF = TTB_SECT_DOMAIN(0) | TTB_SECT_XN_MASK | TTB_SECT,
+ DCACHE_WRITETHROUGH = DCACHE_OFF | TTB_SECT_C_MASK,
+ DCACHE_WRITEBACK = DCACHE_WRITETHROUGH | TTB_SECT_B_MASK,
+ DCACHE_WRITEALLOC = DCACHE_WRITEBACK | TTB_SECT_TEX(1),
+};
+#else
+#define TTB_SECT_AP (3 << 10)
/* options available for data cache on each page */
enum dcache_option {
DCACHE_OFF = 0x12,
DCACHE_WRITETHROUGH = 0x1a,
DCACHE_WRITEBACK = 0x1e,
+ DCACHE_WRITEALLOC = 0x16,
};
+#endif
/* Size of an MMU section */
enum {
- MMU_SECTION_SHIFT = 20,
+#ifdef CONFIG_ARMV7_LPAE
+ MMU_SECTION_SHIFT = 21, /* 2MB */
+#else
+ MMU_SECTION_SHIFT = 20, /* 1MB */
+#endif
MMU_SECTION_SIZE = 1 << MMU_SECTION_SHIFT,
};
-/**
- * Change the cache settings for a region.
- *
- * \param start start address of memory region to change
- * \param size size of memory region to change
- * \param option dcache option to select
- */
-void mmu_set_region_dcache_behaviour(u32 start, int size,
- enum dcache_option option);
+#ifdef CONFIG_CPU_V7
+/* TTBR0 bits */
+#define TTBR0_BASE_ADDR_MASK 0xFFFFC000
+#define TTBR0_RGN_NC (0 << 3)
+#define TTBR0_RGN_WBWA (1 << 3)
+#define TTBR0_RGN_WT (2 << 3)
+#define TTBR0_RGN_WB (3 << 3)
+/* TTBR0[6] is IRGN[0] and TTBR[0] is IRGN[1] */
+#define TTBR0_IRGN_NC (0 << 0 | 0 << 6)
+#define TTBR0_IRGN_WBWA (0 << 0 | 1 << 6)
+#define TTBR0_IRGN_WT (1 << 0 | 0 << 6)
+#define TTBR0_IRGN_WB (1 << 0 | 1 << 6)
+#endif
/**
* Register an update to the page tables, and flush the TLB
#endif /* __KERNEL__ */
+#endif /* CONFIG_ARM64 */
+
+#ifndef __ASSEMBLY__
+/**
+ * Change the cache settings for a region.
+ *
+ * \param start start address of memory region to change
+ * \param size size of memory region to change
+ * \param option dcache option to select
+ */
+void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size,
+ enum dcache_option option);
+
+#ifdef CONFIG_SYS_NONCACHED_MEMORY
+void noncached_init(void);
+phys_addr_t noncached_alloc(size_t size, size_t align);
+#endif /* CONFIG_SYS_NONCACHED_MEMORY */
+
+#endif /* __ASSEMBLY__ */
+
#endif