arm: mach-k3: Enable dcache in SPL
[oweals/u-boot.git] / arch / x86 / cpu / i386 / cpu.c
index 09a5b919e077d761f431808a57d9120b7934276d..435e50edada7d368613c00a92442bb36a63c534b 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * (C) Copyright 2008-2011
  * Graeme Russ, <graeme.russ@gmail.com>
  *
  * Part of this file is adapted from coreboot
  * src/arch/x86/lib/cpu.c
- *
- * SPDX-License-Identifier:    GPL-2.0+
  */
 
 #include <common.h>
+#include <cpu_func.h>
+#include <init.h>
 #include <malloc.h>
+#include <spl.h>
 #include <asm/control_regs.h>
+#include <asm/coreboot_tables.h>
 #include <asm/cpu.h>
 #include <asm/mp.h>
 #include <asm/msr.h>
@@ -58,6 +61,8 @@ struct cpuinfo_x86 {
        uint8_t x86_mask;
 };
 
+/* gcc 7.3 does not wwant to drop x86_vendors, so use #ifdef */
+#ifndef CONFIG_TPL_BUILD
 /*
  * List of cpu vendor strings along with their normalized
  * id values.
@@ -78,6 +83,7 @@ static const struct {
        { X86_VENDOR_NSC,       "Geode by NSC", },
        { X86_VENDOR_SIS,       "SiS SiS SiS ", },
 };
+#endif
 
 static void load_ds(u32 segment)
 {
@@ -131,10 +137,14 @@ void arch_setup_gd(gd_t *new_gd)
        /* DS: data, read/write, 4 GB, base 0 */
        gdt_addr[X86_GDT_ENTRY_32BIT_DS] = GDT_ENTRY(0xc093, 0, 0xfffff);
 
-       /* FS: data, read/write, 4 GB, base (Global Data Pointer) */
+       /*
+        * FS: data, read/write, sizeof (Global Data Pointer),
+        * base (Global Data Pointer)
+        */
        new_gd->arch.gd_addr = new_gd;
-       gdt_addr[X86_GDT_ENTRY_32BIT_FS] = GDT_ENTRY(0xc093,
-                    (ulong)&new_gd->arch.gd_addr, 0xfffff);
+       gdt_addr[X86_GDT_ENTRY_32BIT_FS] = GDT_ENTRY(0x8093,
+                                       (ulong)&new_gd->arch.gd_addr,
+                                       sizeof(new_gd->arch.gd_addr) - 1);
 
        /* 16-bit CS: code, read/execute, 64 kB, base 0 */
        gdt_addr[X86_GDT_ENTRY_16BIT_CS] = GDT_ENTRY(0x009b, 0, 0x0ffff);
@@ -199,6 +209,7 @@ static inline int test_cyrix_52div(void)
        return (unsigned char) (test >> 8) == 0x02;
 }
 
+#ifndef CONFIG_TPL_BUILD
 /*
  *     Detect a NexGen CPU running without BIOS hypercode new enough
  *     to have CPUID. (Thanks to Herbert Oppmann)
@@ -219,6 +230,7 @@ static int deep_magic_nexgen_probe(void)
                : "=a" (ret) : : "cx", "dx");
        return  ret;
 }
+#endif
 
 static bool has_cpuid(void)
 {
@@ -230,6 +242,7 @@ static bool has_mtrr(void)
        return cpuid_edx(0x00000001) & (1 << 12) ? true : false;
 }
 
+#ifndef CONFIG_TPL_BUILD
 static int build_vendor_name(char *vendor_name)
 {
        struct cpuid_result result;
@@ -242,14 +255,40 @@ static int build_vendor_name(char *vendor_name)
 
        return result.eax;
 }
+#endif
 
 static void identify_cpu(struct cpu_device_id *cpu)
 {
+       cpu->device = 0; /* fix gcc 4.4.4 warning */
+
+       /*
+        * Do a quick and dirty check to save space - Intel and AMD only and
+        * just the vendor. This is enough for most TPL code.
+        */
+       if (spl_phase() == PHASE_TPL) {
+               struct cpuid_result result;
+
+               result = cpuid(0x00000000);
+               switch (result.ecx >> 24) {
+               case 'l': /* GenuineIntel */
+                       cpu->vendor = X86_VENDOR_INTEL;
+                       break;
+               case 'D': /* AuthenticAMD */
+                       cpu->vendor = X86_VENDOR_AMD;
+                       break;
+               default:
+                       cpu->vendor = X86_VENDOR_ANY;
+                       break;
+               }
+               return;
+       }
+
+/* gcc 7.3 does not want to drop x86_vendors, so use #ifdef */
+#ifndef CONFIG_TPL_BUILD
        char vendor_name[16];
        int i;
 
        vendor_name[0] = '\0'; /* Unset */
-       cpu->device = 0; /* fix gcc 4.4.4 warning */
 
        /* Find the id and vendor_name */
        if (!has_cpuid()) {
@@ -265,9 +304,8 @@ static void identify_cpu(struct cpu_device_id *cpu)
                /* Detect NexGen with old hypercode */
                else if (deep_magic_nexgen_probe())
                        memcpy(vendor_name, "NexGenDriven", 13);
-       }
-       if (has_cpuid()) {
-               int  cpuid_level;
+       } else {
+               int cpuid_level;
 
                cpuid_level = build_vendor_name(vendor_name);
                vendor_name[12] = '\0';
@@ -287,6 +325,7 @@ static void identify_cpu(struct cpu_device_id *cpu)
                        break;
                }
        }
+#endif
 }
 
 static inline void get_fms(struct cpuinfo_x86 *c, uint32_t tfms)
@@ -310,21 +349,22 @@ u32 cpu_get_stepping(void)
        return gd->arch.x86_mask;
 }
 
-int x86_cpu_init_f(void)
+/* initialise FPU, reset EM, set MP and NE */
+static void setup_cpu_features(void)
 {
        const u32 em_rst = ~X86_CR0_EM;
        const u32 mp_ne_set = X86_CR0_MP | X86_CR0_NE;
 
-       if (ll_boot_init()) {
-               /* initialize FPU, reset EM, set MP and NE */
-               asm ("fninit\n" \
-               "movl %%cr0, %%eax\n" \
-               "andl %0, %%eax\n" \
-               "orl  %1, %%eax\n" \
-               "movl %%eax, %%cr0\n" \
-               : : "i" (em_rst), "i" (mp_ne_set) : "eax");
-       }
+       asm ("fninit\n" \
+       "movl %%cr0, %%eax\n" \
+       "andl %0, %%eax\n" \
+       "orl  %1, %%eax\n" \
+       "movl %%eax, %%cr0\n" \
+       : : "i" (em_rst), "i" (mp_ne_set) : "eax");
+}
 
+static void setup_identity(void)
+{
        /* identify CPU via cpuid and store the decoded info into gd->arch */
        if (has_cpuid()) {
                struct cpu_device_id cpu;
@@ -340,46 +380,80 @@ int x86_cpu_init_f(void)
 
                gd->arch.has_mtrr = has_mtrr();
        }
-       /* Don't allow PCI region 3 to use memory in the 2-4GB memory hole */
+}
+
+/* Don't allow PCI region 3 to use memory in the 2-4GB memory hole */
+static void setup_pci_ram_top(void)
+{
        gd->pci_ram_top = 0x80000000U;
+}
+
+static void setup_mtrr(void)
+{
+       u64 mtrr_cap;
 
        /* Configure fixed range MTRRs for some legacy regions */
-       if (gd->arch.has_mtrr) {
-               u64 mtrr_cap;
-
-               mtrr_cap = native_read_msr(MTRR_CAP_MSR);
-               if (mtrr_cap & MTRR_CAP_FIX) {
-                       /* Mark the VGA RAM area as uncacheable */
-                       native_write_msr(MTRR_FIX_16K_A0000_MSR,
-                                        MTRR_FIX_TYPE(MTRR_TYPE_UNCACHEABLE),
-                                        MTRR_FIX_TYPE(MTRR_TYPE_UNCACHEABLE));
-
-                       /*
-                        * Mark the PCI ROM area as cacheable to improve ROM
-                        * execution performance.
-                        */
-                       native_write_msr(MTRR_FIX_4K_C0000_MSR,
-                                        MTRR_FIX_TYPE(MTRR_TYPE_WRBACK),
-                                        MTRR_FIX_TYPE(MTRR_TYPE_WRBACK));
-                       native_write_msr(MTRR_FIX_4K_C8000_MSR,
-                                        MTRR_FIX_TYPE(MTRR_TYPE_WRBACK),
-                                        MTRR_FIX_TYPE(MTRR_TYPE_WRBACK));
-                       native_write_msr(MTRR_FIX_4K_D0000_MSR,
-                                        MTRR_FIX_TYPE(MTRR_TYPE_WRBACK),
-                                        MTRR_FIX_TYPE(MTRR_TYPE_WRBACK));
-                       native_write_msr(MTRR_FIX_4K_D8000_MSR,
-                                        MTRR_FIX_TYPE(MTRR_TYPE_WRBACK),
-                                        MTRR_FIX_TYPE(MTRR_TYPE_WRBACK));
-
-                       /* Enable the fixed range MTRRs */
-                       msr_setbits_64(MTRR_DEF_TYPE_MSR, MTRR_DEF_TYPE_FIX_EN);
-               }
+       if (!gd->arch.has_mtrr)
+               return;
+
+       mtrr_cap = native_read_msr(MTRR_CAP_MSR);
+       if (mtrr_cap & MTRR_CAP_FIX) {
+               /* Mark the VGA RAM area as uncacheable */
+               native_write_msr(MTRR_FIX_16K_A0000_MSR,
+                                MTRR_FIX_TYPE(MTRR_TYPE_UNCACHEABLE),
+                                MTRR_FIX_TYPE(MTRR_TYPE_UNCACHEABLE));
+
+               /*
+                * Mark the PCI ROM area as cacheable to improve ROM
+                * execution performance.
+                */
+               native_write_msr(MTRR_FIX_4K_C0000_MSR,
+                                MTRR_FIX_TYPE(MTRR_TYPE_WRBACK),
+                                MTRR_FIX_TYPE(MTRR_TYPE_WRBACK));
+               native_write_msr(MTRR_FIX_4K_C8000_MSR,
+                                MTRR_FIX_TYPE(MTRR_TYPE_WRBACK),
+                                MTRR_FIX_TYPE(MTRR_TYPE_WRBACK));
+               native_write_msr(MTRR_FIX_4K_D0000_MSR,
+                                MTRR_FIX_TYPE(MTRR_TYPE_WRBACK),
+                                MTRR_FIX_TYPE(MTRR_TYPE_WRBACK));
+               native_write_msr(MTRR_FIX_4K_D8000_MSR,
+                                MTRR_FIX_TYPE(MTRR_TYPE_WRBACK),
+                                MTRR_FIX_TYPE(MTRR_TYPE_WRBACK));
+
+               /* Enable the fixed range MTRRs */
+               msr_setbits_64(MTRR_DEF_TYPE_MSR, MTRR_DEF_TYPE_FIX_EN);
        }
+}
+
+int x86_cpu_init_tpl(void)
+{
+       setup_cpu_features();
+       setup_identity();
+
+       return 0;
+}
+
+int x86_cpu_init_f(void)
+{
+       if (ll_boot_init())
+               setup_cpu_features();
+       setup_identity();
+       setup_mtrr();
+       setup_pci_ram_top();
 
-#ifdef CONFIG_I8254_TIMER
        /* Set up the i8254 timer if required */
-       i8254_init();
-#endif
+       if (IS_ENABLED(CONFIG_I8254_TIMER))
+               i8254_init();
+
+       return 0;
+}
+
+int x86_cpu_reinit_f(void)
+{
+       setup_identity();
+       setup_pci_ram_top();
+       if (locate_coreboot_table() >= 0)
+               gd->flags |= GD_FLG_SKIP_LL_INIT;
 
        return 0;
 }
@@ -463,6 +537,7 @@ int cpu_has_64bit(void)
                has_long_mode();
 }
 
+#define PAGETABLE_BASE         0x80000
 #define PAGETABLE_SIZE         (6 * 4096)
 
 /**
@@ -503,6 +578,48 @@ int cpu_jump_to_64bit(ulong setup_base, ulong target)
        return -EFAULT;
 }
 
+/*
+ * Jump from SPL to U-Boot
+ *
+ * This function is work-in-progress with many issues to resolve.
+ *
+ * It works by setting up several regions:
+ *   ptr      - a place to put the code that jumps into 64-bit mode
+ *   gdt      - a place to put the global descriptor table
+ *   pgtable  - a place to put the page tables
+ *
+ * The cpu_call64() code is copied from ROM and then manually patched so that
+ * it has the correct GDT address in RAM. U-Boot is copied from ROM into
+ * its pre-relocation address. Then we jump to the cpu_call64() code in RAM,
+ * which changes to 64-bit mode and starts U-Boot.
+ */
+int cpu_jump_to_64bit_uboot(ulong target)
+{
+       typedef void (*func_t)(ulong pgtable, ulong setup_base, ulong target);
+       uint32_t *pgtable;
+       func_t func;
+       char *ptr;
+
+       pgtable = (uint32_t *)PAGETABLE_BASE;
+
+       build_pagetable(pgtable);
+
+       extern long call64_stub_size;
+       ptr = malloc(call64_stub_size);
+       if (!ptr) {
+               printf("Failed to allocate the cpu_call64 stub\n");
+               return -ENOMEM;
+       }
+       memcpy(ptr, cpu_call64, call64_stub_size);
+
+       func = (func_t)ptr;
+
+       /* Jump to U-Boot */
+       func((ulong)pgtable, 0, (ulong)target);
+
+       return -EFAULT;
+}
+
 #ifdef CONFIG_SMP
 static int enable_smis(struct udevice *cpu, void *unused)
 {