3 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Enhanced CPU detection and feature setting code by Mike Jagdis
6 * and Martin Mares, November 1997.
10 #include <linux/threads.h>
11 #include <linux/init.h>
12 #include <linux/linkage.h>
13 #include <asm/segment.h>
14 #include <asm/page_types.h>
15 #include <asm/pgtable_types.h>
16 #include <asm/cache.h>
17 #include <asm/thread_info.h>
18 #include <asm/asm-offsets.h>
19 #include <asm/setup.h>
20 #include <asm/processor-flags.h>
21 #include <asm/msr-index.h>
22 #include <asm/cpufeature.h>
23 #include <asm/percpu.h>
26 /* Physical address */
27 #define pa(X) ((X) - __PAGE_OFFSET)
30 * References to members of the new_cpu_data structure.
33 #define X86 new_cpu_data+CPUINFO_x86
34 #define X86_VENDOR new_cpu_data+CPUINFO_x86_vendor
35 #define X86_MODEL new_cpu_data+CPUINFO_x86_model
36 #define X86_MASK new_cpu_data+CPUINFO_x86_mask
37 #define X86_HARD_MATH new_cpu_data+CPUINFO_hard_math
38 #define X86_CPUID new_cpu_data+CPUINFO_cpuid_level
39 #define X86_CAPABILITY new_cpu_data+CPUINFO_x86_capability
40 #define X86_VENDOR_ID new_cpu_data+CPUINFO_x86_vendor_id
43 * This is how much memory in addition to the memory covered up to
44 * and including _end we need mapped initially.
46 * (KERNEL_IMAGE_SIZE/4096) / 1024 pages (worst case, non PAE)
47 * (KERNEL_IMAGE_SIZE/4096) / 512 + 4 pages (worst case for PAE)
49 * Modulo rounding, each megabyte assigned here requires a kilobyte of
50 * memory, which is currently unreclaimed.
52 * This should be a multiple of a page.
54 * KERNEL_IMAGE_SIZE should be greater than pa(_end)
55 * and small than max_low_pfn, otherwise will waste some page table entries
59 #define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
61 #define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
65 * Number of possible pages in the lowmem region.
67 * We shift 2 by 31 instead of 1 by 32 to the left in order to avoid a
68 * gas warning about overflowing shift count when gas has been compiled
69 * with only a host target support using a 32-bit type for internal
72 LOWMEM_PAGES = (((2<<31) - __PAGE_OFFSET) >> PAGE_SHIFT)
74 /* Enough space to fit pagetables for the low memory linear map */
75 MAPPING_BEYOND_END = PAGE_TABLE_SIZE(LOWMEM_PAGES) << PAGE_SHIFT
78 * Worst-case size of the kernel mapping we need to make:
79 * a relocatable kernel can live anywhere in lowmem, so we need to be able
80 * to map all of lowmem.
82 KERNEL_PAGES = LOWMEM_PAGES
84 INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
85 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
88 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
89 * %esi points to the real-mode code as a 32-bit pointer.
90 * CS and DS must be 4 GB flat segments, but we don't depend on
91 * any particular GDT layout, because we load our own as soon as we
96 movl pa(stack_start),%ecx
98 /* test KEEP_SEGMENTS flag to see if the bootloader is asking
99 us to not reload segments */
100 testb $(1<<6), BP_loadflags(%esi)
104 * Set segments to known values.
106 lgdt pa(boot_gdt_descr)
107 movl $(__BOOT_DS),%eax
114 leal -__PAGE_OFFSET(%ecx),%esp
117 * Clear BSS first so that there are no surprises...
121 movl $pa(__bss_start),%edi
122 movl $pa(__bss_stop),%ecx
127 * Copy bootup parameters out of the way.
128 * Note: %esi still has the pointer to the real-mode data.
129 * With the kexec as boot loader, parameter segment might be loaded beyond
130 * kernel image and might not even be addressable by early boot page tables.
131 * (kexec on panic case). Hence copy out the parameters before initializing
134 movl $pa(boot_params),%edi
135 movl $(PARAM_SIZE/4),%ecx
139 movl pa(boot_params) + NEW_CL_POINTER,%esi
141 jz 1f # No command line
142 movl $pa(boot_command_line),%edi
143 movl $(COMMAND_LINE_SIZE/4),%ecx
149 /* save OFW's pgdir table for later use when calling into OFW */
151 movl %eax, pa(olpc_ofw_pgd)
154 #ifdef CONFIG_MICROCODE_EARLY
155 /* Early load ucode on BSP. */
160 * Initialize page tables. This creates a PDE and a set of page
161 * tables, which are located immediately beyond __brk_base. The variable
162 * _brk_end is set up to point to the first "safe" location.
163 * Mappings are created both at virtual address 0 (identity mapping)
164 * and PAGE_OFFSET for up to _end.
166 #ifdef CONFIG_X86_PAE
169 * In PAE mode initial_page_table is statically defined to contain
170 * enough entries to cover the VMSPLIT option (that is the top 1, 2 or 3
171 * entries). The identity mapping is handled by pointing two PGD entries
172 * to the first kernel PMD.
174 * Note the upper half of each PMD or PTE are always zero at this stage.
177 #define KPMDS (((-__PAGE_OFFSET) >> 30) & 3) /* Number of kernel PMDs */
179 xorl %ebx,%ebx /* %ebx is kept at zero */
181 movl $pa(__brk_base), %edi
182 movl $pa(initial_pg_pmd), %edx
183 movl $PTE_IDENT_ATTR, %eax
185 leal PDE_IDENT_ATTR(%edi),%ecx /* Create PMD entry */
186 movl %ecx,(%edx) /* Store PMD entry */
187 /* Upper half already zero */
199 * End condition: we must map up to the end + MAPPING_BEYOND_END.
201 movl $pa(_end) + MAPPING_BEYOND_END + PTE_IDENT_ATTR, %ebp
205 addl $__PAGE_OFFSET, %edi
206 movl %edi, pa(_brk_end)
208 movl %eax, pa(max_pfn_mapped)
210 /* Do early initialization of the fixmap area */
211 movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
212 movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
215 page_pde_offset = (__PAGE_OFFSET >> 20);
217 movl $pa(__brk_base), %edi
218 movl $pa(initial_page_table), %edx
219 movl $PTE_IDENT_ATTR, %eax
221 leal PDE_IDENT_ATTR(%edi),%ecx /* Create PDE entry */
222 movl %ecx,(%edx) /* Store identity PDE entry */
223 movl %ecx,page_pde_offset(%edx) /* Store kernel PDE entry */
231 * End condition: we must map up to the end + MAPPING_BEYOND_END.
233 movl $pa(_end) + MAPPING_BEYOND_END + PTE_IDENT_ATTR, %ebp
236 addl $__PAGE_OFFSET, %edi
237 movl %edi, pa(_brk_end)
239 movl %eax, pa(max_pfn_mapped)
241 /* Do early initialization of the fixmap area */
242 movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
243 movl %eax,pa(initial_page_table+0xffc)
246 #ifdef CONFIG_PARAVIRT
247 /* This is can only trip for a broken bootloader... */
248 cmpw $0x207, pa(boot_params + BP_version)
251 /* Paravirt-compatible boot parameters. Look to see what architecture
252 we're booting under. */
253 movl pa(boot_params + BP_hardware_subarch), %eax
254 cmpl $num_subarch_entries, %eax
257 movl pa(subarch_entries)(,%eax,4), %eax
258 subl $__PAGE_OFFSET, %eax
264 /* Unknown implementation; there's really
265 nothing we can do at this point. */
271 .long default_entry /* normal x86/PC */
272 .long lguest_entry /* lguest hypervisor */
273 .long xen_entry /* Xen hypervisor */
274 .long default_entry /* Moorestown MID */
275 num_subarch_entries = (. - subarch_entries) / 4
279 #endif /* CONFIG_PARAVIRT */
281 #ifdef CONFIG_HOTPLUG_CPU
283 * Boot CPU0 entry point. It's called from play_dead(). Everything has been set
284 * up already except stack. We just set up stack here. Then call
288 movl stack_start, %ecx
295 * Non-boot CPU entry point; entered from trampoline.S
296 * We can't lgdt here, because lgdt itself uses a data segment, but
297 * we know the trampoline has already loaded the boot_gdt for us.
299 * If cpu hotplug is not supported then this code can go in init section
300 * which will be freed later
302 ENTRY(startup_32_smp)
304 movl $(__BOOT_DS),%eax
309 movl pa(stack_start),%ecx
311 leal -__PAGE_OFFSET(%ecx),%esp
313 #ifdef CONFIG_MICROCODE_EARLY
314 /* Early load ucode on AP. */
320 #define CR0_STATE (X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | \
321 X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \
323 movl $(CR0_STATE & ~X86_CR0_PG),%eax
327 * We want to start out with EFLAGS unambiguously cleared. Some BIOSes leave
328 * bits like NT set. This would confuse the debugger if this code is traced. So
329 * initialize them properly now before switching to protected mode. That means
330 * DF in particular (even though we have cleared it earlier after copying the
331 * command line) because GCC expects it.
337 * New page tables may be in 4Mbyte page mode and may be using the global pages.
339 * NOTE! If we are on a 486 we may have no cr4 at all! Specifically, cr4 exists
340 * if and only if CPUID exists and has flags other than the FPU flag set.
342 movl $-1,pa(X86_CPUID) # preset CPUID level
343 movl $X86_EFLAGS_ID,%ecx
345 popfl # set EFLAGS=ID
347 popl %eax # get EFLAGS
348 testl $X86_EFLAGS_ID,%eax # did EFLAGS.ID remained set?
349 jz enable_paging # hw disallowed setting of ID bit
350 # which means no CPUID and no CR4
354 movl %eax,pa(X86_CPUID) # save largest std CPUID function
358 andl $~1,%edx # Ignore CPUID.FPU
359 jz enable_paging # No flags or only CPUID.FPU = no CR4
361 movl pa(mmu_cr4_features),%eax
364 testb $X86_CR4_PAE, %al # check if PAE is enabled
367 /* Check if extended functions are implemented */
368 movl $0x80000000, %eax
370 /* Value must be in the range 0x80000001 to 0x8000ffff */
371 subl $0x80000001, %eax
372 cmpl $(0x8000ffff-0x80000001), %eax
375 /* Clear bogus XD_DISABLE bits */
378 mov $0x80000001, %eax
380 /* Execute Disable bit supported? */
381 btl $(X86_FEATURE_NX & 31), %edx
384 /* Setup EFER (Extended Feature Enable Register) */
389 /* Make changes effective */
397 movl $pa(initial_page_table), %eax
398 movl %eax,%cr3 /* set the page table pointer.. */
400 movl %eax,%cr0 /* ..and set paging (PG) bit */
401 ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */
403 /* Shift the stack pointer to a virtual address */
404 addl $__PAGE_OFFSET, %esp
407 * start system 32-bit setup. We need to re-do some of the things done
408 * in 16-bit mode for the "real" operations.
410 movl setup_once_ref,%eax
412 jz 1f # Did we do this already?
419 movb $4,X86 # at least 486
423 /* get vendor info */
424 xorl %eax,%eax # call CPUID with 0 -> return vendor ID
426 movl %eax,X86_CPUID # save CPUID level
427 movl %ebx,X86_VENDOR_ID # lo 4 chars
428 movl %edx,X86_VENDOR_ID+4 # next 4 chars
429 movl %ecx,X86_VENDOR_ID+8 # last 4 chars
431 orl %eax,%eax # do we have processor info as well?
434 movl $1,%eax # Use the CPUID instruction to get CPU type
436 movb %al,%cl # save reg for future use
437 andb $0x0f,%ah # mask processor family
439 andb $0xf0,%al # mask model
442 andb $0x0f,%cl # mask mask revision
444 movl %edx,X86_CAPABILITY
447 movl $0x50022,%ecx # set AM, WP, NE and MP
449 andl $0x80000011,%eax # Save PG,PE,ET
455 ljmp $(__KERNEL_CS),$1f
456 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
457 movl %eax,%ss # after changing gdt.
459 movl $(__USER_DS),%eax # DS/ES contains default USER segment
463 movl $(__KERNEL_PERCPU), %eax
464 movl %eax,%fs # set this cpu's percpu
466 movl $(__KERNEL_STACK_CANARY),%eax
469 xorl %eax,%eax # Clear LDT
472 pushl $0 # fake return address for unwinder
475 #include "verify_cpu.S"
480 * The setup work we only want to run on the BSP.
482 * Warning: %esi is live across this function.
487 * Set up a idt with 256 interrupt gates that push zero if there
488 * is no error code and then jump to early_idt_handler_common.
489 * It doesn't actually load the idt - that needs to be done on
490 * each CPU. Interrupts are enabled elsewhere, when we can be
491 * relatively sure everything is ok.
495 movl $early_idt_handler_array,%eax
496 movl $NUM_EXCEPTION_VECTORS,%ecx
500 /* interrupt gate, dpl=0, present */
501 movl $(0x8E000000 + __KERNEL_CS),2(%edi)
502 addl $EARLY_IDT_HANDLER_SIZE,%eax
506 movl $256 - NUM_EXCEPTION_VECTORS,%ecx
507 movl $ignore_int,%edx
508 movl $(__KERNEL_CS << 16),%eax
509 movw %dx,%ax /* selector = 0x0010 = cs */
510 movw $0x8E00,%dx /* interrupt gate - dpl=0, present */
517 #ifdef CONFIG_CC_STACKPROTECTOR
519 * Configure the stack canary. The linker can't handle this by
520 * relocation. Manually set base address in stack canary
521 * segment descriptor.
524 movl $stack_canary,%ecx
525 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
527 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
528 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
531 andl $0,setup_once_ref /* Once is enough, thanks */
534 ENTRY(early_idt_handler_array)
538 # 24(%rsp) error code
540 .rept NUM_EXCEPTION_VECTORS
541 .ifeq (EXCEPTION_ERRCODE_MASK >> i) & 1
542 pushl $0 # Dummy error code, to make stack frame uniform
544 pushl $i # 20(%esp) Vector number
545 jmp early_idt_handler_common
547 .fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
549 ENDPROC(early_idt_handler_array)
551 early_idt_handler_common:
553 * The stack is the hardware frame, an error code or zero, and the
558 cmpl $2,(%esp) # X86_TRAP_NMI
559 je is_nmi # Ignore NMI
561 cmpl $2,%ss:early_recursion_flag
563 incl %ss:early_recursion_flag
570 movl $(__KERNEL_DS),%eax
574 cmpl $(__KERNEL_CS),32(%esp)
577 leal 28(%esp),%eax # Pointer to %eip
578 call early_fixup_exception
580 jnz ex_entry /* found an exception entry */
585 movw %ax,2(%esp) /* clean up the segment values on some cpus */
589 pushl %eax /* %esp before the exception */
596 pushl (20+6*4)(%esp) /* trapno */
611 decl %ss:early_recursion_flag
613 addl $8,%esp /* drop vector number and error code */
615 ENDPROC(early_idt_handler_common)
617 /* This is the default interrupt "handler" :-) */
627 movl $(__KERNEL_DS),%eax
630 cmpl $2,early_recursion_flag
632 incl early_recursion_flag
653 early_recursion_flag:
659 .long i386_start_kernel
660 ENTRY(setup_once_ref)
668 #ifdef CONFIG_X86_PAE
672 ENTRY(initial_page_table)
677 ENTRY(empty_zero_page)
679 ENTRY(swapper_pg_dir)
683 * This starts the data section.
685 #ifdef CONFIG_X86_PAE
687 /* Page-aligned for the benefit of paravirt? */
689 ENTRY(initial_page_table)
690 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
692 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0
693 .long pa(initial_pg_pmd+PGD_IDENT_ATTR+0x1000),0
694 .long pa(initial_pg_pmd+PGD_IDENT_ATTR+0x2000),0
697 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0
698 .long pa(initial_pg_pmd+PGD_IDENT_ATTR+0x1000),0
702 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0
704 # error "Kernel PMDs should be 1, 2 or 3"
706 .align PAGE_SIZE /* needs to be page-sized too */
712 .long init_thread_union+THREAD_SIZE
716 .asciz "Unknown interrupt or fault at: %p %p %p\n"
720 .ascii "BUG: Int %d: CR2 %p\n"
721 /* regs pushed in early_idt_handler: */
722 .ascii " EDI %p ESI %p EBP %p EBX %p\n"
723 .ascii " ESP %p ES %p DS %p\n"
724 .ascii " EDX %p ECX %p EAX %p\n"
726 .ascii " vec %p err %p EIP %p CS %p flg %p\n"
727 .ascii "Stack: %p %p %p %p %p %p %p %p\n"
728 .ascii " %p %p %p %p %p %p %p %p\n"
729 .asciz " %p %p %p %p %p %p %p %p\n"
731 #include "../../x86/xen/xen-head.S"
734 * The IDT and GDT 'descriptors' are a strange 48-bit object
735 * only used by the lidt and lgdt instructions. They are not
736 * like usual segment descriptors - they consist of a 16-bit
737 * segment size, and 32-bit linear address value:
741 .globl boot_gdt_descr
745 # early boot GDT descriptor (must use 1:1 address mapping)
746 .word 0 # 32 bit align gdt_desc.address
749 .long boot_gdt - __PAGE_OFFSET
751 .word 0 # 32-bit align idt_desc.address
753 .word IDT_ENTRIES*8-1 # idt contains 256 entries
756 # boot GDT descriptor (later on used by CPU#0):
757 .word 0 # 32 bit align gdt_desc.address
758 ENTRY(early_gdt_descr)
759 .word GDT_ENTRIES*8-1
760 .long gdt_page /* Overwritten for secondary CPUs */
763 * The boot_gdt must mirror the equivalent in setup.S and is
764 * used only for booting.
766 .align L1_CACHE_BYTES
768 .fill GDT_ENTRY_BOOT_CS,8,0
769 .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
770 .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */