X-Git-Url: https://git.librecmc.org/?a=blobdiff_plain;f=arch%2Fpowerpc%2Fcpu%2Fmpc85xx%2Fstart.S;h=5e0d78d0064f6ef120d21e9a9b2170cd97f9d1ec;hb=1fed668b3fb9c35932f58af00ff5539239fa4e1d;hp=7e5e6b17c019567826efe133d877b4a42586969f;hpb=d1e0b10accdbac2e0a8b2cbf7c589645442f87c5;p=oweals%2Fu-boot.git diff --git a/arch/powerpc/cpu/mpc85xx/start.S b/arch/powerpc/cpu/mpc85xx/start.S index 7e5e6b17c0..5e0d78d006 100644 --- a/arch/powerpc/cpu/mpc85xx/start.S +++ b/arch/powerpc/cpu/mpc85xx/start.S @@ -1,5 +1,5 @@ /* - * Copyright 2004, 2007-2010 Freescale Semiconductor, Inc. + * Copyright 2004, 2007-2011 Freescale Semiconductor, Inc. * Copyright (C) 2003 Motorola,Inc. * * See file CREDITS for list of people who contributed to this @@ -28,9 +28,9 @@ * */ +#include #include #include -#include #include #define _LINUX_CONFIG_H 1 /* avoid reading Linux autoconf.h file */ @@ -41,10 +41,6 @@ #include #include -#ifndef CONFIG_IDENT_STRING -#define CONFIG_IDENT_STRING "" -#endif - #undef MSR_KERNEL #define MSR_KERNEL ( MSR_ME ) /* Machine Check */ @@ -65,7 +61,7 @@ #endif GOT_ENTRY(__init_end) - GOT_ENTRY(_end) + GOT_ENTRY(__bss_end__) GOT_ENTRY(__bss_start) END_GOT @@ -87,6 +83,45 @@ _start_e500: +#if defined(CONFIG_SECURE_BOOT) && defined(CONFIG_E500MC) + /* ISBC uses L2 as stack. + * Disable L2 cache here so that u-boot can enable it later + * as part of it's normal flow + */ + + /* Check if L2 is enabled */ + mfspr r3, SPRN_L2CSR0 + lis r2, L2CSR0_L2E@h + ori r2, r2, L2CSR0_L2E@l + and. r4, r3, r2 + beq l2_disabled + + mfspr r3, SPRN_L2CSR0 + /* Flush L2 cache */ + lis r2,(L2CSR0_L2FL)@h + ori r2, r2, (L2CSR0_L2FL)@l + or r3, r2, r3 + sync + isync + mtspr SPRN_L2CSR0,r3 + isync +1: + mfspr r3, SPRN_L2CSR0 + and. r1, r3, r2 + bne 1b + + mfspr r3, SPRN_L2CSR0 + lis r2, L2CSR0_L2E@h + ori r2, r2, L2CSR0_L2E@l + andc r4, r3, r2 + sync + isync + mtspr SPRN_L2CSR0,r4 + isync + +l2_disabled: +#endif + /* clear registers/arrays not reset by hardware */ /* L1 */ @@ -145,7 +180,7 @@ _start_e500: beq 2b /* Setup interrupt vectors */ - lis r1,CONFIG_SYS_TEXT_BASE@h + lis r1,CONFIG_SYS_MONITOR_BASE@h mtspr IVPR,r1 li r1,0x0100 @@ -283,33 +318,281 @@ _start_e500: #endif /* CONFIG_MPC8569 */ +/* + * Relocate CCSR, if necessary. We relocate CCSR if (obviously) the default + * location is not where we want it. This typically happens on a 36-bit + * system, where we want to move CCSR to near the top of 36-bit address space. + * + * To move CCSR, we create two temporary TLBs, one for the old location, and + * another for the new location. On CoreNet systems, we also need to create + * a special, temporary LAW. + * + * As a general rule, TLB0 is used for short-term TLBs, and TLB1 is used for + * long-term TLBs, so we use TLB0 here. + */ +#if (CONFIG_SYS_CCSRBAR_DEFAULT != CONFIG_SYS_CCSRBAR_PHYS) + +#if !defined(CONFIG_SYS_CCSRBAR_PHYS_HIGH) || !defined(CONFIG_SYS_CCSRBAR_PHYS_LOW) +#error "CONFIG_SYS_CCSRBAR_PHYS_HIGH and CONFIG_SYS_CCSRBAR_PHYS_LOW) must be defined." +#endif + +purge_old_ccsr_tlb: + lis r8, CONFIG_SYS_CCSRBAR@h + ori r8, r8, CONFIG_SYS_CCSRBAR@l + lis r9, (CONFIG_SYS_CCSRBAR + 0x1000)@h + ori r9, r9, (CONFIG_SYS_CCSRBAR + 0x1000)@l + + /* + * In a multi-stage boot (e.g. NAND boot), a previous stage may have + * created a TLB for CCSR, which will interfere with our relocation + * code. Since we're going to create a new TLB for CCSR anyway, + * it should be safe to delete this old TLB here. We have to search + * for it, though. + */ + + li r1, 0 + mtspr MAS6, r1 /* Search the current address space and PID */ + tlbsx 0, r8 + mfspr r1, MAS1 + andis. r2, r1, MAS1_VALID@h /* Check for the Valid bit */ + beq 1f /* Skip if no TLB found */ + + rlwinm r1, r1, 0, 1, 31 /* Clear Valid bit */ + mtspr MAS1, r1 + tlbwe +1: + +create_ccsr_new_tlb: + /* + * Create a TLB for the new location of CCSR. Register R8 is reserved + * for the virtual address of this TLB (CONFIG_SYS_CCSRBAR). + */ + lis r0, FSL_BOOKE_MAS0(0, 0, 0)@h + ori r0, r0, FSL_BOOKE_MAS0(0, 0, 0)@l + lis r1, FSL_BOOKE_MAS1(1, 0, 0, 0, BOOKE_PAGESZ_4K)@h + ori r1, r1, FSL_BOOKE_MAS1(1, 0, 0, 0, BOOKE_PAGESZ_4K)@l + lis r2, FSL_BOOKE_MAS2(CONFIG_SYS_CCSRBAR, (MAS2_I|MAS2_G))@h + ori r2, r2, FSL_BOOKE_MAS2(CONFIG_SYS_CCSRBAR, (MAS2_I|MAS2_G))@l + lis r3, FSL_BOOKE_MAS3(CONFIG_SYS_CCSRBAR_PHYS_LOW, 0, (MAS3_SW|MAS3_SR))@h + ori r3, r3, FSL_BOOKE_MAS3(CONFIG_SYS_CCSRBAR_PHYS_LOW, 0, (MAS3_SW|MAS3_SR))@l + lis r7, CONFIG_SYS_CCSRBAR_PHYS_HIGH@h + ori r7, r7, CONFIG_SYS_CCSRBAR_PHYS_HIGH@l + mtspr MAS0, r0 + mtspr MAS1, r1 + mtspr MAS2, r2 + mtspr MAS3, r3 + mtspr MAS7, r7 + isync + msync + tlbwe + + /* + * Create a TLB for the old location of CCSR. Register R9 is reserved + * for the virtual address of this TLB (CONFIG_SYS_CCSRBAR + 0x1000). + */ +create_ccsr_old_tlb: + lis r0, FSL_BOOKE_MAS0(0, 1, 0)@h + ori r0, r0, FSL_BOOKE_MAS0(0, 1, 0)@l + lis r2, FSL_BOOKE_MAS2(CONFIG_SYS_CCSRBAR + 0x1000, (MAS2_I|MAS2_G))@h + ori r2, r2, FSL_BOOKE_MAS2(CONFIG_SYS_CCSRBAR + 0x1000, (MAS2_I|MAS2_G))@l + lis r3, FSL_BOOKE_MAS3(CONFIG_SYS_CCSRBAR_DEFAULT, 0, (MAS3_SW|MAS3_SR))@h + ori r3, r3, FSL_BOOKE_MAS3(CONFIG_SYS_CCSRBAR_DEFAULT, 0, (MAS3_SW|MAS3_SR))@l + li r7, 0 /* The default CCSR address is always a 32-bit number */ + mtspr MAS0, r0 + /* MAS1 is the same as above */ + mtspr MAS2, r2 + mtspr MAS3, r3 + mtspr MAS7, r7 + isync + msync + tlbwe + +#ifdef CONFIG_FSL_CORENET + +#define CCSR_LAWBARH0 (CONFIG_SYS_CCSRBAR + 0x1000) +#define LAW_EN 0x80000000 +#define LAW_SIZE_4K 0xb +#define CCSRBAR_LAWAR (LAW_EN | (0x1e << 20) | LAW_SIZE_4K) +#define CCSRAR_C 0x80000000 /* Commit */ + +create_temp_law: + /* + * On CoreNet systems, we create the temporary LAW using a special LAW + * target ID of 0x1e. LAWBARH is at offset 0xc00 in CCSR. + */ + lis r0, CONFIG_SYS_CCSRBAR_PHYS_HIGH@h + ori r0, r0, CONFIG_SYS_CCSRBAR_PHYS_HIGH@l + lis r1, CONFIG_SYS_CCSRBAR_PHYS_LOW@h + ori r1, r1, CONFIG_SYS_CCSRBAR_PHYS_LOW@l + lis r2, CCSRBAR_LAWAR@h + ori r2, r2, CCSRBAR_LAWAR@l + + stw r0, 0xc00(r9) /* LAWBARH0 */ + stw r1, 0xc04(r9) /* LAWBARL0 */ + sync + stw r2, 0xc08(r9) /* LAWAR0 */ + + /* + * Read back from LAWAR to ensure the update is complete. e500mc + * cores also require an isync. + */ + lwz r0, 0xc08(r9) /* LAWAR0 */ + isync + + /* + * Read the current CCSRBARH and CCSRBARL using load word instructions. + * Follow this with an isync instruction. This forces any outstanding + * accesses to configuration space to completion. + */ +read_old_ccsrbar: + lwz r0, 0(r9) /* CCSRBARH */ + lwz r0, 4(r9) /* CCSRBARH */ + isync + + /* + * Write the new values for CCSRBARH and CCSRBARL to their old + * locations. The CCSRBARH has a shadow register. When the CCSRBARH + * has a new value written it loads a CCSRBARH shadow register. When + * the CCSRBARL is written, the CCSRBARH shadow register contents + * along with the CCSRBARL value are loaded into the CCSRBARH and + * CCSRBARL registers, respectively. Follow this with a sync + * instruction. + */ +write_new_ccsrbar: + lis r0, CONFIG_SYS_CCSRBAR_PHYS_HIGH@h + ori r0, r0, CONFIG_SYS_CCSRBAR_PHYS_HIGH@l + lis r1, CONFIG_SYS_CCSRBAR_PHYS_LOW@h + ori r1, r1, CONFIG_SYS_CCSRBAR_PHYS_LOW@l + lis r2, CCSRAR_C@h + ori r2, r2, CCSRAR_C@l + + stw r0, 0(r9) /* Write to CCSRBARH */ + sync /* Make sure we write to CCSRBARH first */ + stw r1, 4(r9) /* Write to CCSRBARL */ + sync + + /* + * Write a 1 to the commit bit (C) of CCSRAR at the old location. + * Follow this with a sync instruction. + */ + stw r2, 8(r9) + sync + + /* Delete the temporary LAW */ +delete_temp_law: + li r1, 0 + stw r1, 0xc08(r8) + sync + stw r1, 0xc00(r8) + stw r1, 0xc04(r8) + sync + +#else /* #ifdef CONFIG_FSL_CORENET */ + +write_new_ccsrbar: + /* + * Read the current value of CCSRBAR using a load word instruction + * followed by an isync. This forces all accesses to configuration + * space to complete. + */ + sync + lwz r0, 0(r9) + isync + +/* CONFIG_SYS_CCSRBAR_PHYS right shifted by 12 */ +#define CCSRBAR_PHYS_RS12 ((CONFIG_SYS_CCSRBAR_PHYS_HIGH << 20) | \ + (CONFIG_SYS_CCSRBAR_PHYS_LOW >> 12)) + + /* Write the new value to CCSRBAR. */ + lis r0, CCSRBAR_PHYS_RS12@h + ori r0, r0, CCSRBAR_PHYS_RS12@l + stw r0, 0(r9) + sync + + /* + * The manual says to perform a load of an address that does not + * access configuration space or the on-chip SRAM using an existing TLB, + * but that doesn't appear to be necessary. We will do the isync, + * though. + */ + isync + + /* + * Read the contents of CCSRBAR from its new location, followed by + * another isync. + */ + lwz r0, 0(r8) + isync + +#endif /* #ifdef CONFIG_FSL_CORENET */ + + /* Delete the temporary TLBs */ +delete_temp_tlbs: + lis r0, FSL_BOOKE_MAS0(0, 0, 0)@h + ori r0, r0, FSL_BOOKE_MAS0(0, 0, 0)@l + li r1, 0 + lis r2, FSL_BOOKE_MAS2(CONFIG_SYS_CCSRBAR, (MAS2_I|MAS2_G))@h + ori r2, r2, FSL_BOOKE_MAS2(CONFIG_SYS_CCSRBAR, (MAS2_I|MAS2_G))@l + mtspr MAS0, r0 + mtspr MAS1, r1 + mtspr MAS2, r2 + isync + msync + tlbwe + + lis r0, FSL_BOOKE_MAS0(0, 1, 0)@h + ori r0, r0, FSL_BOOKE_MAS0(0, 1, 0)@l + lis r2, FSL_BOOKE_MAS2(CONFIG_SYS_CCSRBAR + 0x1000, (MAS2_I|MAS2_G))@h + ori r2, r2, FSL_BOOKE_MAS2(CONFIG_SYS_CCSRBAR + 0x1000, (MAS2_I|MAS2_G))@l + mtspr MAS0, r0 + mtspr MAS2, r2 + isync + msync + tlbwe +#endif /* #if (CONFIG_SYS_CCSRBAR_DEFAULT != CONFIG_SYS_CCSRBAR_PHYS) */ + +create_init_ram_area: lis r6,FSL_BOOKE_MAS0(1, 15, 0)@h ori r6,r6,FSL_BOOKE_MAS0(1, 15, 0)@l -#ifndef CONFIG_SYS_RAMBOOT +#if !defined(CONFIG_SYS_RAMBOOT) && !defined(CONFIG_SECURE_BOOT) /* create a temp mapping in AS=1 to the 4M boot window */ lis r7,FSL_BOOKE_MAS1(1, 1, 0, 1, BOOKE_PAGESZ_4M)@h ori r7,r7,FSL_BOOKE_MAS1(1, 1, 0, 1, BOOKE_PAGESZ_4M)@l - lis r8,FSL_BOOKE_MAS2(CONFIG_SYS_TEXT_BASE & 0xffc00000, (MAS2_I|MAS2_G))@h - ori r8,r8,FSL_BOOKE_MAS2(CONFIG_SYS_TEXT_BASE & 0xffc00000, (MAS2_I|MAS2_G))@l + lis r8,FSL_BOOKE_MAS2(CONFIG_SYS_MONITOR_BASE & 0xffc00000, (MAS2_I|MAS2_G))@h + ori r8,r8,FSL_BOOKE_MAS2(CONFIG_SYS_MONITOR_BASE & 0xffc00000, (MAS2_I|MAS2_G))@l /* The 85xx has the default boot window 0xff800000 - 0xffffffff */ lis r9,FSL_BOOKE_MAS3(0xffc00000, 0, (MAS3_SX|MAS3_SW|MAS3_SR))@h ori r9,r9,FSL_BOOKE_MAS3(0xffc00000, 0, (MAS3_SX|MAS3_SW|MAS3_SR))@l +#elif !defined(CONFIG_SYS_RAMBOOT) && defined(CONFIG_SECURE_BOOT) + /* create a temp mapping in AS = 1 for Flash mapping + * created by PBL for ISBC code + */ + lis r7,FSL_BOOKE_MAS1(1, 1, 0, 1, BOOKE_PAGESZ_1M)@h + ori r7,r7,FSL_BOOKE_MAS1(1, 1, 0, 1, BOOKE_PAGESZ_1M)@l + + lis r8,FSL_BOOKE_MAS2(CONFIG_SYS_MONITOR_BASE, (MAS2_I|MAS2_G))@h + ori r8,r8,FSL_BOOKE_MAS2(CONFIG_SYS_MONITOR_BASE, (MAS2_I|MAS2_G))@l + + lis r9,FSL_BOOKE_MAS3(CONFIG_SYS_PBI_FLASH_WINDOW, 0, + (MAS3_SX|MAS3_SW|MAS3_SR))@h + ori r9,r9,FSL_BOOKE_MAS3(CONFIG_SYS_PBI_FLASH_WINDOW, 0, + (MAS3_SX|MAS3_SW|MAS3_SR))@l #else /* - * create a temp mapping in AS=1 to the 1M CONFIG_SYS_TEXT_BASE space, the main - * image has been relocated to CONFIG_SYS_TEXT_BASE on the second stage. + * create a temp mapping in AS=1 to the 1M CONFIG_SYS_MONITOR_BASE space, the main + * image has been relocated to CONFIG_SYS_MONITOR_BASE on the second stage. */ lis r7,FSL_BOOKE_MAS1(1, 1, 0, 1, BOOKE_PAGESZ_1M)@h ori r7,r7,FSL_BOOKE_MAS1(1, 1, 0, 1, BOOKE_PAGESZ_1M)@l - lis r8,FSL_BOOKE_MAS2(CONFIG_SYS_TEXT_BASE, (MAS2_I|MAS2_G))@h - ori r8,r8,FSL_BOOKE_MAS2(CONFIG_SYS_TEXT_BASE, (MAS2_I|MAS2_G))@l + lis r8,FSL_BOOKE_MAS2(CONFIG_SYS_MONITOR_BASE, (MAS2_I|MAS2_G))@h + ori r8,r8,FSL_BOOKE_MAS2(CONFIG_SYS_MONITOR_BASE, (MAS2_I|MAS2_G))@l - lis r9,FSL_BOOKE_MAS3(CONFIG_SYS_TEXT_BASE, 0, (MAS3_SX|MAS3_SW|MAS3_SR))@h - ori r9,r9,FSL_BOOKE_MAS3(CONFIG_SYS_TEXT_BASE, 0, (MAS3_SX|MAS3_SW|MAS3_SR))@l + lis r9,FSL_BOOKE_MAS3(CONFIG_SYS_MONITOR_BASE, 0, (MAS3_SX|MAS3_SW|MAS3_SR))@h + ori r9,r9,FSL_BOOKE_MAS3(CONFIG_SYS_MONITOR_BASE, 0, (MAS3_SX|MAS3_SW|MAS3_SR))@l #endif mtspr MAS0,r6 @@ -398,9 +681,7 @@ _start: .long 0x27051956 /* U-BOOT Magic Number */ .globl version_string version_string: - .ascii U_BOOT_VERSION - .ascii " (", U_BOOT_DATE, " - ", U_BOOT_TIME, ")" - .ascii CONFIG_IDENT_STRING, "\0" + .ascii U_BOOT_VERSION_STRING, "\0" .align 4 .globl _start_cont @@ -682,6 +963,8 @@ mck_return: /* Cache functions. */ +.globl flush_icache +flush_icache: .globl invalidate_icache invalidate_icache: mfspr r0,L1CSR1 @@ -752,7 +1035,7 @@ dcache_disable: lis r4,0 ori r4,r4,L1CSR0_DCE andc r3,r3,r4 - mtspr L1CSR0,r0 + mtspr L1CSR0,r3 isync blr @@ -1039,7 +1322,7 @@ in_ram: lwzux r0,r4,r11 cmpwi r0,0 add r0,r0,r11 - stw r10,0(r3) + stw r4,0(r3) beq- 5f stw r0,0(r4) 5: bdnz 3b @@ -1049,7 +1332,7 @@ clear_bss: * Now clear BSS segment */ lwz r3,GOT(__bss_start) - lwz r4,GOT(_end) + lwz r4,GOT(__bss_end__) cmplw 0,r3,r4 beq 6f