2 * Core of Xen paravirt_ops implementation.
4 * This file contains the xen_paravirt_ops structure itself, and the
6 * - privileged instructions
11 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
14 #include <linux/cpu.h>
15 #include <linux/kernel.h>
16 #include <linux/init.h>
17 #include <linux/smp.h>
18 #include <linux/preempt.h>
19 #include <linux/hardirq.h>
20 #include <linux/percpu.h>
21 #include <linux/delay.h>
22 #include <linux/start_kernel.h>
23 #include <linux/sched.h>
24 #include <linux/kprobes.h>
25 #include <linux/bootmem.h>
26 #include <linux/export.h>
28 #include <linux/page-flags.h>
29 #include <linux/highmem.h>
30 #include <linux/console.h>
31 #include <linux/pci.h>
32 #include <linux/gfp.h>
33 #include <linux/memblock.h>
34 #include <linux/edd.h>
35 #include <linux/frame.h>
37 #include <linux/kexec.h>
38 #include <linux/slab.h>
41 #include <xen/events.h>
42 #include <xen/interface/xen.h>
43 #include <xen/interface/version.h>
44 #include <xen/interface/physdev.h>
45 #include <xen/interface/vcpu.h>
46 #include <xen/interface/memory.h>
47 #include <xen/interface/nmi.h>
48 #include <xen/interface/xen-mca.h>
49 #include <xen/features.h>
52 #include <xen/hvc-console.h>
55 #include <asm/paravirt.h>
58 #include <asm/xen/pci.h>
59 #include <asm/xen/hypercall.h>
60 #include <asm/xen/hypervisor.h>
61 #include <asm/xen/cpuid.h>
62 #include <asm/fixmap.h>
63 #include <asm/processor.h>
64 #include <asm/proto.h>
65 #include <asm/msr-index.h>
66 #include <asm/traps.h>
67 #include <asm/setup.h>
69 #include <asm/pgalloc.h>
70 #include <asm/pgtable.h>
71 #include <asm/tlbflush.h>
72 #include <asm/reboot.h>
73 #include <asm/stackprotector.h>
74 #include <asm/hypervisor.h>
75 #include <asm/mach_traps.h>
76 #include <asm/mwait.h>
77 #include <asm/pci_x86.h>
79 #include <asm/unwind_hints.h>
82 #include <linux/acpi.h>
84 #include <acpi/pdc_intel.h>
85 #include <acpi/processor.h>
86 #include <xen/interface/platform.h>
92 #include "multicalls.h"
95 EXPORT_SYMBOL_GPL(hypercall_page);
98 * Pointer to the xen_vcpu_info structure or
99 * &HYPERVISOR_shared_info->vcpu_info[cpu]. See xen_hvm_init_shared_info
100 * and xen_vcpu_setup for details. By default it points to share_info->vcpu_info
101 * but if the hypervisor supports VCPUOP_register_vcpu_info then it can point
102 * to xen_vcpu_info. The pointer is used in __xen_evtchn_do_upcall to
103 * acknowledge pending events.
104 * Also more subtly it is used by the patched version of irq enable/disable
105 * e.g. xen_irq_enable_direct and xen_iret in PV mode.
107 * The desire to be able to do those mask/unmask operations as a single
108 * instruction by using the per-cpu offset held in %gs is the real reason
109 * vcpu info is in a per-cpu pointer and the original reason for this
113 DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
116 * Per CPU pages used if hypervisor supports VCPUOP_register_vcpu_info
117 * hypercall. This can be used both in PV and PVHVM mode. The structure
118 * overrides the default per_cpu(xen_vcpu, cpu) value.
120 DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info);
122 /* Linux <-> Xen vCPU id mapping */
123 DEFINE_PER_CPU(uint32_t, xen_vcpu_id);
124 EXPORT_PER_CPU_SYMBOL(xen_vcpu_id);
126 enum xen_domain_type xen_domain_type = XEN_NATIVE;
127 EXPORT_SYMBOL_GPL(xen_domain_type);
129 unsigned long *machine_to_phys_mapping = (void *)MACH2PHYS_VIRT_START;
130 EXPORT_SYMBOL(machine_to_phys_mapping);
131 unsigned long machine_to_phys_nr;
132 EXPORT_SYMBOL(machine_to_phys_nr);
134 struct start_info *xen_start_info;
135 EXPORT_SYMBOL_GPL(xen_start_info);
137 struct shared_info xen_dummy_shared_info;
139 void *xen_initial_gdt;
141 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
142 __read_mostly int xen_have_vector_callback;
143 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
145 static int xen_cpu_up_prepare(unsigned int cpu);
146 static int xen_cpu_up_online(unsigned int cpu);
147 static int xen_cpu_dead(unsigned int cpu);
150 * Point at some empty memory to start with. We map the real shared_info
151 * page as soon as fixmap is up and running.
153 struct shared_info *HYPERVISOR_shared_info = &xen_dummy_shared_info;
156 * Flag to determine whether vcpu info placement is available on all
157 * VCPUs. We assume it is to start with, and then set it to zero on
158 * the first failure. This is because it can succeed on some VCPUs
159 * and not others, since it can involve hypervisor memory allocation,
160 * or because the guest failed to guarantee all the appropriate
161 * constraints on all VCPUs (ie buffer can't cross a page boundary).
163 * Note that any particular CPU may be using a placed vcpu structure,
164 * but we can only optimise if the all are.
166 * 0: not available, 1: available
168 static int have_vcpu_info_placement = 1;
171 struct desc_struct desc[3];
175 * Updating the 3 TLS descriptors in the GDT on every task switch is
176 * surprisingly expensive so we avoid updating them if they haven't
177 * changed. Since Xen writes different descriptors than the one
178 * passed in the update_descriptor hypercall we keep shadow copies to
181 static DEFINE_PER_CPU(struct tls_descs, shadow_tls_desc);
183 static void clamp_max_cpus(void)
186 if (setup_max_cpus > MAX_VIRT_CPUS)
187 setup_max_cpus = MAX_VIRT_CPUS;
191 void xen_vcpu_setup(int cpu)
193 struct vcpu_register_vcpu_info info;
195 struct vcpu_info *vcpup;
197 BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
200 * This path is called twice on PVHVM - first during bootup via
201 * smp_init -> xen_hvm_cpu_notify, and then if the VCPU is being
202 * hotplugged: cpu_up -> xen_hvm_cpu_notify.
203 * As we can only do the VCPUOP_register_vcpu_info once lets
204 * not over-write its result.
206 * For PV it is called during restore (xen_vcpu_restore) and bootup
207 * (xen_setup_vcpu_info_placement). The hotplug mechanism does not
210 if (xen_hvm_domain()) {
211 if (per_cpu(xen_vcpu, cpu) == &per_cpu(xen_vcpu_info, cpu))
214 if (xen_vcpu_nr(cpu) < MAX_VIRT_CPUS)
215 per_cpu(xen_vcpu, cpu) =
216 &HYPERVISOR_shared_info->vcpu_info[xen_vcpu_nr(cpu)];
218 if (!have_vcpu_info_placement) {
219 if (cpu >= MAX_VIRT_CPUS)
224 vcpup = &per_cpu(xen_vcpu_info, cpu);
225 info.mfn = arbitrary_virt_to_mfn(vcpup);
226 info.offset = offset_in_page(vcpup);
228 /* Check to see if the hypervisor will put the vcpu_info
229 structure where we want it, which allows direct access via
231 N.B. This hypercall can _only_ be called once per CPU. Subsequent
232 calls will error out with -EINVAL. This is due to the fact that
233 hypervisor has no unregister variant and this hypercall does not
234 allow to over-write info.mfn and info.offset.
236 err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, xen_vcpu_nr(cpu),
240 printk(KERN_DEBUG "register_vcpu_info failed: err=%d\n", err);
241 have_vcpu_info_placement = 0;
244 /* This cpu is using the registered vcpu info, even if
245 later ones fail to. */
246 per_cpu(xen_vcpu, cpu) = vcpup;
251 * On restore, set the vcpu placement up again.
252 * If it fails, then we're in a bad state, since
253 * we can't back out from using it...
255 void xen_vcpu_restore(void)
259 for_each_possible_cpu(cpu) {
260 bool other_cpu = (cpu != smp_processor_id());
261 bool is_up = HYPERVISOR_vcpu_op(VCPUOP_is_up, xen_vcpu_nr(cpu),
264 if (other_cpu && is_up &&
265 HYPERVISOR_vcpu_op(VCPUOP_down, xen_vcpu_nr(cpu), NULL))
268 xen_setup_runstate_info(cpu);
270 if (have_vcpu_info_placement)
273 if (other_cpu && is_up &&
274 HYPERVISOR_vcpu_op(VCPUOP_up, xen_vcpu_nr(cpu), NULL))
279 static void __init xen_banner(void)
281 unsigned version = HYPERVISOR_xen_version(XENVER_version, NULL);
282 struct xen_extraversion extra;
283 HYPERVISOR_xen_version(XENVER_extraversion, &extra);
285 pr_info("Booting paravirtualized kernel %son %s\n",
286 xen_feature(XENFEAT_auto_translated_physmap) ?
287 "with PVH extensions " : "", pv_info.name);
288 printk(KERN_INFO "Xen version: %d.%d%s%s\n",
289 version >> 16, version & 0xffff, extra.extraversion,
290 xen_feature(XENFEAT_mmu_pt_update_preserve_ad) ? " (preserve-AD)" : "");
292 /* Check if running on Xen version (major, minor) or later */
294 xen_running_on_version_or_later(unsigned int major, unsigned int minor)
296 unsigned int version;
301 version = HYPERVISOR_xen_version(XENVER_version, NULL);
302 if ((((version >> 16) == major) && ((version & 0xffff) >= minor)) ||
303 ((version >> 16) > major))
308 #define CPUID_THERM_POWER_LEAF 6
309 #define APERFMPERF_PRESENT 0
311 static __read_mostly unsigned int cpuid_leaf1_edx_mask = ~0;
312 static __read_mostly unsigned int cpuid_leaf1_ecx_mask = ~0;
314 static __read_mostly unsigned int cpuid_leaf1_ecx_set_mask;
315 static __read_mostly unsigned int cpuid_leaf5_ecx_val;
316 static __read_mostly unsigned int cpuid_leaf5_edx_val;
318 static void xen_cpuid(unsigned int *ax, unsigned int *bx,
319 unsigned int *cx, unsigned int *dx)
321 unsigned maskebx = ~0;
322 unsigned maskecx = ~0;
323 unsigned maskedx = ~0;
326 * Mask out inconvenient features, to try and disable as many
327 * unsupported kernel subsystems as possible.
331 maskecx = cpuid_leaf1_ecx_mask;
332 setecx = cpuid_leaf1_ecx_set_mask;
333 maskedx = cpuid_leaf1_edx_mask;
336 case CPUID_MWAIT_LEAF:
337 /* Synthesize the values.. */
340 *cx = cpuid_leaf5_ecx_val;
341 *dx = cpuid_leaf5_edx_val;
344 case CPUID_THERM_POWER_LEAF:
345 /* Disabling APERFMPERF for kernel usage */
346 maskecx = ~(1 << APERFMPERF_PRESENT);
350 /* Suppress extended topology stuff */
355 asm(XEN_EMULATE_PREFIX "cpuid"
360 : "0" (*ax), "2" (*cx));
367 STACK_FRAME_NON_STANDARD(xen_cpuid); /* XEN_EMULATE_PREFIX */
369 static bool __init xen_check_mwait(void)
372 struct xen_platform_op op = {
373 .cmd = XENPF_set_processor_pminfo,
374 .u.set_pminfo.id = -1,
375 .u.set_pminfo.type = XEN_PM_PDC,
378 unsigned int ax, bx, cx, dx;
379 unsigned int mwait_mask;
381 /* We need to determine whether it is OK to expose the MWAIT
382 * capability to the kernel to harvest deeper than C3 states from ACPI
383 * _CST using the processor_harvest_xen.c module. For this to work, we
384 * need to gather the MWAIT_LEAF values (which the cstate.c code
385 * checks against). The hypervisor won't expose the MWAIT flag because
386 * it would break backwards compatibility; so we will find out directly
387 * from the hardware and hypercall.
389 if (!xen_initial_domain())
393 * When running under platform earlier than Xen4.2, do not expose
394 * mwait, to avoid the risk of loading native acpi pad driver
396 if (!xen_running_on_version_or_later(4, 2))
402 native_cpuid(&ax, &bx, &cx, &dx);
404 mwait_mask = (1 << (X86_FEATURE_EST % 32)) |
405 (1 << (X86_FEATURE_MWAIT % 32));
407 if ((cx & mwait_mask) != mwait_mask)
410 /* We need to emulate the MWAIT_LEAF and for that we need both
411 * ecx and edx. The hypercall provides only partial information.
414 ax = CPUID_MWAIT_LEAF;
419 native_cpuid(&ax, &bx, &cx, &dx);
421 /* Ask the Hypervisor whether to clear ACPI_PDC_C_C2C3_FFH. If so,
422 * don't expose MWAIT_LEAF and let ACPI pick the IOPORT version of C3.
424 buf[0] = ACPI_PDC_REVISION_ID;
426 buf[2] = (ACPI_PDC_C_CAPABILITY_SMP | ACPI_PDC_EST_CAPABILITY_SWSMP);
428 set_xen_guest_handle(op.u.set_pminfo.pdc, buf);
430 if ((HYPERVISOR_platform_op(&op) == 0) &&
431 (buf[2] & (ACPI_PDC_C_C1_FFH | ACPI_PDC_C_C2C3_FFH))) {
432 cpuid_leaf5_ecx_val = cx;
433 cpuid_leaf5_edx_val = dx;
440 static void __init xen_init_cpuid_mask(void)
442 unsigned int ax, bx, cx, dx;
443 unsigned int xsave_mask;
445 cpuid_leaf1_edx_mask =
446 ~((1 << X86_FEATURE_MTRR) | /* disable MTRR */
447 (1 << X86_FEATURE_ACC)); /* thermal monitoring */
450 * Xen PV would need some work to support PCID: CR3 handling as well
451 * as xen_flush_tlb_others() would need updating.
453 cpuid_leaf1_ecx_mask &= ~(1 << (X86_FEATURE_PCID % 32)); /* disable PCID */
455 if (!xen_initial_domain())
456 cpuid_leaf1_edx_mask &=
457 ~((1 << X86_FEATURE_ACPI)); /* disable ACPI */
459 cpuid_leaf1_ecx_mask &= ~(1 << (X86_FEATURE_X2APIC % 32));
463 cpuid(1, &ax, &bx, &cx, &dx);
466 (1 << (X86_FEATURE_XSAVE % 32)) |
467 (1 << (X86_FEATURE_OSXSAVE % 32));
469 /* Xen will set CR4.OSXSAVE if supported and not disabled by force */
470 if ((cx & xsave_mask) != xsave_mask)
471 cpuid_leaf1_ecx_mask &= ~xsave_mask; /* disable XSAVE & OSXSAVE */
472 if (xen_check_mwait())
473 cpuid_leaf1_ecx_set_mask = (1 << (X86_FEATURE_MWAIT % 32));
476 static void __init xen_init_capabilities(void)
479 setup_force_cpu_cap(X86_FEATURE_XENPV);
482 static void xen_set_debugreg(int reg, unsigned long val)
484 HYPERVISOR_set_debugreg(reg, val);
487 static unsigned long xen_get_debugreg(int reg)
489 return HYPERVISOR_get_debugreg(reg);
492 static void xen_end_context_switch(struct task_struct *next)
495 paravirt_end_context_switch(next);
498 static unsigned long xen_store_tr(void)
504 * Set the page permissions for a particular virtual address. If the
505 * address is a vmalloc mapping (or other non-linear mapping), then
506 * find the linear mapping of the page and also set its protections to
509 static void set_aliased_prot(void *v, pgprot_t prot)
518 ptep = lookup_address((unsigned long)v, &level);
519 BUG_ON(ptep == NULL);
521 pfn = pte_pfn(*ptep);
522 page = pfn_to_page(pfn);
524 pte = pfn_pte(pfn, prot);
527 * Careful: update_va_mapping() will fail if the virtual address
528 * we're poking isn't populated in the page tables. We don't
529 * need to worry about the direct map (that's always in the page
530 * tables), but we need to be careful about vmap space. In
531 * particular, the top level page table can lazily propagate
532 * entries between processes, so if we've switched mms since we
533 * vmapped the target in the first place, we might not have the
534 * top-level page table entry populated.
536 * We disable preemption because we want the same mm active when
537 * we probe the target and when we issue the hypercall. We'll
538 * have the same nominal mm, but if we're a kernel thread, lazy
539 * mm dropping could change our pgd.
541 * Out of an abundance of caution, this uses __get_user() to fault
542 * in the target address just in case there's some obscure case
543 * in which the target address isn't readable.
548 probe_kernel_read(&dummy, v, 1);
550 if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0))
553 if (!PageHighMem(page)) {
554 void *av = __va(PFN_PHYS(pfn));
557 if (HYPERVISOR_update_va_mapping((unsigned long)av, pte, 0))
565 static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries)
567 const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE;
571 * We need to mark the all aliases of the LDT pages RO. We
572 * don't need to call vm_flush_aliases(), though, since that's
573 * only responsible for flushing aliases out the TLBs, not the
574 * page tables, and Xen will flush the TLB for us if needed.
576 * To avoid confusing future readers: none of this is necessary
577 * to load the LDT. The hypervisor only checks this when the
578 * LDT is faulted in due to subsequent descriptor access.
581 for(i = 0; i < entries; i += entries_per_page)
582 set_aliased_prot(ldt + i, PAGE_KERNEL_RO);
585 static void xen_free_ldt(struct desc_struct *ldt, unsigned entries)
587 const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE;
590 for(i = 0; i < entries; i += entries_per_page)
591 set_aliased_prot(ldt + i, PAGE_KERNEL);
594 static void xen_set_ldt(const void *addr, unsigned entries)
596 struct mmuext_op *op;
597 struct multicall_space mcs = xen_mc_entry(sizeof(*op));
599 trace_xen_cpu_set_ldt(addr, entries);
602 op->cmd = MMUEXT_SET_LDT;
603 op->arg1.linear_addr = (unsigned long)addr;
604 op->arg2.nr_ents = entries;
606 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
608 xen_mc_issue(PARAVIRT_LAZY_CPU);
611 static void xen_load_gdt(const struct desc_ptr *dtr)
613 unsigned long va = dtr->address;
614 unsigned int size = dtr->size + 1;
615 unsigned pages = DIV_ROUND_UP(size, PAGE_SIZE);
616 unsigned long frames[pages];
620 * A GDT can be up to 64k in size, which corresponds to 8192
621 * 8-byte entries, or 16 4k pages..
624 BUG_ON(size > 65536);
625 BUG_ON(va & ~PAGE_MASK);
627 for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
630 unsigned long pfn, mfn;
634 * The GDT is per-cpu and is in the percpu data area.
635 * That can be virtually mapped, so we need to do a
636 * page-walk to get the underlying MFN for the
637 * hypercall. The page can also be in the kernel's
638 * linear range, so we need to RO that mapping too.
640 ptep = lookup_address(va, &level);
641 BUG_ON(ptep == NULL);
643 pfn = pte_pfn(*ptep);
644 mfn = pfn_to_mfn(pfn);
645 virt = __va(PFN_PHYS(pfn));
649 make_lowmem_page_readonly((void *)va);
650 make_lowmem_page_readonly(virt);
653 if (HYPERVISOR_set_gdt(frames, size / sizeof(struct desc_struct)))
658 * load_gdt for early boot, when the gdt is only mapped once
660 static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
662 unsigned long va = dtr->address;
663 unsigned int size = dtr->size + 1;
664 unsigned pages = DIV_ROUND_UP(size, PAGE_SIZE);
665 unsigned long frames[pages];
669 * A GDT can be up to 64k in size, which corresponds to 8192
670 * 8-byte entries, or 16 4k pages..
673 BUG_ON(size > 65536);
674 BUG_ON(va & ~PAGE_MASK);
676 for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
678 unsigned long pfn, mfn;
680 pfn = virt_to_pfn(va);
681 mfn = pfn_to_mfn(pfn);
683 pte = pfn_pte(pfn, PAGE_KERNEL_RO);
685 if (HYPERVISOR_update_va_mapping((unsigned long)va, pte, 0))
691 if (HYPERVISOR_set_gdt(frames, size / sizeof(struct desc_struct)))
695 static inline bool desc_equal(const struct desc_struct *d1,
696 const struct desc_struct *d2)
698 return d1->a == d2->a && d1->b == d2->b;
701 static void load_TLS_descriptor(struct thread_struct *t,
702 unsigned int cpu, unsigned int i)
704 struct desc_struct *shadow = &per_cpu(shadow_tls_desc, cpu).desc[i];
705 struct desc_struct *gdt;
707 struct multicall_space mc;
709 if (desc_equal(shadow, &t->tls_array[i]))
712 *shadow = t->tls_array[i];
714 gdt = get_cpu_gdt_table(cpu);
715 maddr = arbitrary_virt_to_machine(&gdt[GDT_ENTRY_TLS_MIN+i]);
716 mc = __xen_mc_entry(0);
718 MULTI_update_descriptor(mc.mc, maddr.maddr, t->tls_array[i]);
721 static void xen_load_tls(struct thread_struct *t, unsigned int cpu)
724 * XXX sleazy hack: If we're being called in a lazy-cpu zone
725 * and lazy gs handling is enabled, it means we're in a
726 * context switch, and %gs has just been saved. This means we
727 * can zero it out to prevent faults on exit from the
728 * hypervisor if the next process has no %gs. Either way, it
729 * has been saved, and the new value will get loaded properly.
730 * This will go away as soon as Xen has been modified to not
731 * save/restore %gs for normal hypercalls.
733 * On x86_64, this hack is not used for %gs, because gs points
734 * to KERNEL_GS_BASE (and uses it for PDA references), so we
735 * must not zero %gs on x86_64
737 * For x86_64, we need to zero %fs, otherwise we may get an
738 * exception between the new %fs descriptor being loaded and
739 * %fs being effectively cleared at __switch_to().
741 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU) {
751 load_TLS_descriptor(t, cpu, 0);
752 load_TLS_descriptor(t, cpu, 1);
753 load_TLS_descriptor(t, cpu, 2);
755 xen_mc_issue(PARAVIRT_LAZY_CPU);
759 static void xen_load_gs_index(unsigned int idx)
761 if (HYPERVISOR_set_segment_base(SEGBASE_GS_USER_SEL, idx))
766 static void xen_write_ldt_entry(struct desc_struct *dt, int entrynum,
769 xmaddr_t mach_lp = arbitrary_virt_to_machine(&dt[entrynum]);
770 u64 entry = *(u64 *)ptr;
772 trace_xen_cpu_write_ldt_entry(dt, entrynum, entry);
777 if (HYPERVISOR_update_descriptor(mach_lp.maddr, entry))
783 static int cvt_gate_to_trap(int vector, const gate_desc *val,
784 struct trap_info *info)
788 if (val->type != GATE_TRAP && val->type != GATE_INTERRUPT)
791 info->vector = vector;
793 addr = gate_offset(*val);
796 * Look for known traps using IST, and substitute them
797 * appropriately. The debugger ones are the only ones we care
798 * about. Xen will handle faults like double_fault,
799 * so we should never see them. Warn if
800 * there's an unexpected IST-using fault handler.
802 if (addr == (unsigned long)debug)
803 addr = (unsigned long)xen_debug;
804 else if (addr == (unsigned long)int3)
805 addr = (unsigned long)xen_int3;
806 else if (addr == (unsigned long)stack_segment)
807 addr = (unsigned long)xen_stack_segment;
808 else if (addr == (unsigned long)double_fault) {
809 /* Don't need to handle these */
811 #ifdef CONFIG_X86_MCE
812 } else if (addr == (unsigned long)machine_check) {
814 * when xen hypervisor inject vMCE to guest,
815 * use native mce handler to handle it
819 } else if (addr == (unsigned long)nmi)
821 * Use the native version as well.
825 /* Some other trap using IST? */
826 if (WARN_ON(val->ist != 0))
829 #endif /* CONFIG_X86_64 */
830 info->address = addr;
832 info->cs = gate_segment(*val);
833 info->flags = val->dpl;
834 /* interrupt gates clear IF */
835 if (val->type == GATE_INTERRUPT)
836 info->flags |= 1 << 2;
841 /* Locations of each CPU's IDT */
842 static DEFINE_PER_CPU(struct desc_ptr, idt_desc);
844 /* Set an IDT entry. If the entry is part of the current IDT, then
846 static void xen_write_idt_entry(gate_desc *dt, int entrynum, const gate_desc *g)
848 unsigned long p = (unsigned long)&dt[entrynum];
849 unsigned long start, end;
851 trace_xen_cpu_write_idt_entry(dt, entrynum, g);
855 start = __this_cpu_read(idt_desc.address);
856 end = start + __this_cpu_read(idt_desc.size) + 1;
860 native_write_idt_entry(dt, entrynum, g);
862 if (p >= start && (p + 8) <= end) {
863 struct trap_info info[2];
867 if (cvt_gate_to_trap(entrynum, g, &info[0]))
868 if (HYPERVISOR_set_trap_table(info))
875 static void xen_convert_trap_info(const struct desc_ptr *desc,
876 struct trap_info *traps)
878 unsigned in, out, count;
880 count = (desc->size+1) / sizeof(gate_desc);
883 for (in = out = 0; in < count; in++) {
884 gate_desc *entry = (gate_desc*)(desc->address) + in;
886 if (cvt_gate_to_trap(in, entry, &traps[out]))
889 traps[out].address = 0;
892 void xen_copy_trap_info(struct trap_info *traps)
894 const struct desc_ptr *desc = this_cpu_ptr(&idt_desc);
896 xen_convert_trap_info(desc, traps);
899 /* Load a new IDT into Xen. In principle this can be per-CPU, so we
900 hold a spinlock to protect the static traps[] array (static because
901 it avoids allocation, and saves stack space). */
902 static void xen_load_idt(const struct desc_ptr *desc)
904 static DEFINE_SPINLOCK(lock);
905 static struct trap_info traps[257];
907 trace_xen_cpu_load_idt(desc);
911 memcpy(this_cpu_ptr(&idt_desc), desc, sizeof(idt_desc));
913 xen_convert_trap_info(desc, traps);
916 if (HYPERVISOR_set_trap_table(traps))
922 /* Write a GDT descriptor entry. Ignore LDT descriptors, since
923 they're handled differently. */
924 static void xen_write_gdt_entry(struct desc_struct *dt, int entry,
925 const void *desc, int type)
927 trace_xen_cpu_write_gdt_entry(dt, entry, desc, type);
938 xmaddr_t maddr = arbitrary_virt_to_machine(&dt[entry]);
941 if (HYPERVISOR_update_descriptor(maddr.maddr, *(u64 *)desc))
951 * Version of write_gdt_entry for use at early boot-time needed to
952 * update an entry as simply as possible.
954 static void __init xen_write_gdt_entry_boot(struct desc_struct *dt, int entry,
955 const void *desc, int type)
957 trace_xen_cpu_write_gdt_entry(dt, entry, desc, type);
966 xmaddr_t maddr = virt_to_machine(&dt[entry]);
968 if (HYPERVISOR_update_descriptor(maddr.maddr, *(u64 *)desc))
969 dt[entry] = *(struct desc_struct *)desc;
975 static void xen_load_sp0(struct tss_struct *tss,
976 struct thread_struct *thread)
978 struct multicall_space mcs;
980 mcs = xen_mc_entry(0);
981 MULTI_stack_switch(mcs.mc, __KERNEL_DS, thread->sp0);
982 xen_mc_issue(PARAVIRT_LAZY_CPU);
983 tss->x86_tss.sp0 = thread->sp0;
986 void xen_set_iopl_mask(unsigned mask)
988 struct physdev_set_iopl set_iopl;
990 /* Force the change at ring 0. */
991 set_iopl.iopl = (mask == 0) ? 1 : (mask >> 12) & 3;
992 HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
995 static void xen_io_delay(void)
999 static void xen_clts(void)
1001 struct multicall_space mcs;
1003 mcs = xen_mc_entry(0);
1005 MULTI_fpu_taskswitch(mcs.mc, 0);
1007 xen_mc_issue(PARAVIRT_LAZY_CPU);
1010 static DEFINE_PER_CPU(unsigned long, xen_cr0_value);
1012 static unsigned long xen_read_cr0(void)
1014 unsigned long cr0 = this_cpu_read(xen_cr0_value);
1016 if (unlikely(cr0 == 0)) {
1017 cr0 = native_read_cr0();
1018 this_cpu_write(xen_cr0_value, cr0);
1024 static void xen_write_cr0(unsigned long cr0)
1026 struct multicall_space mcs;
1028 this_cpu_write(xen_cr0_value, cr0);
1030 /* Only pay attention to cr0.TS; everything else is
1032 mcs = xen_mc_entry(0);
1034 MULTI_fpu_taskswitch(mcs.mc, (cr0 & X86_CR0_TS) != 0);
1036 xen_mc_issue(PARAVIRT_LAZY_CPU);
1039 static void xen_write_cr4(unsigned long cr4)
1041 cr4 &= ~(X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PCE);
1043 native_write_cr4(cr4);
1045 #ifdef CONFIG_X86_64
1046 static inline unsigned long xen_read_cr8(void)
1050 static inline void xen_write_cr8(unsigned long val)
1056 static u64 xen_read_msr_safe(unsigned int msr, int *err)
1060 if (pmu_msr_read(msr, &val, err))
1063 val = native_read_msr_safe(msr, err);
1065 case MSR_IA32_APICBASE:
1066 #ifdef CONFIG_X86_X2APIC
1067 if (!(cpuid_ecx(1) & (1 << (X86_FEATURE_X2APIC & 31))))
1069 val &= ~X2APIC_ENABLE;
1075 static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high)
1082 #ifdef CONFIG_X86_64
1086 case MSR_FS_BASE: which = SEGBASE_FS; goto set;
1087 case MSR_KERNEL_GS_BASE: which = SEGBASE_GS_USER; goto set;
1088 case MSR_GS_BASE: which = SEGBASE_GS_KERNEL; goto set;
1091 base = ((u64)high << 32) | low;
1092 if (HYPERVISOR_set_segment_base(which, base) != 0)
1100 case MSR_SYSCALL_MASK:
1101 case MSR_IA32_SYSENTER_CS:
1102 case MSR_IA32_SYSENTER_ESP:
1103 case MSR_IA32_SYSENTER_EIP:
1104 /* Fast syscall setup is all done in hypercalls, so
1105 these are all ignored. Stub them out here to stop
1106 Xen console noise. */
1110 if (!pmu_msr_write(msr, low, high, &ret))
1111 ret = native_write_msr_safe(msr, low, high);
1117 static u64 xen_read_msr(unsigned int msr)
1120 * This will silently swallow a #GP from RDMSR. It may be worth
1125 return xen_read_msr_safe(msr, &err);
1128 static void xen_write_msr(unsigned int msr, unsigned low, unsigned high)
1131 * This will silently swallow a #GP from WRMSR. It may be worth
1134 xen_write_msr_safe(msr, low, high);
1137 void xen_setup_shared_info(void)
1139 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
1140 set_fixmap(FIX_PARAVIRT_BOOTMAP,
1141 xen_start_info->shared_info);
1143 HYPERVISOR_shared_info =
1144 (struct shared_info *)fix_to_virt(FIX_PARAVIRT_BOOTMAP);
1146 HYPERVISOR_shared_info =
1147 (struct shared_info *)__va(xen_start_info->shared_info);
1150 /* In UP this is as good a place as any to set up shared info */
1151 xen_setup_vcpu_info_placement();
1154 xen_setup_mfn_list_list();
1157 /* This is called once we have the cpu_possible_mask */
1158 void xen_setup_vcpu_info_placement(void)
1162 for_each_possible_cpu(cpu) {
1163 /* Set up direct vCPU id mapping for PV guests. */
1164 per_cpu(xen_vcpu_id, cpu) = cpu;
1165 xen_vcpu_setup(cpu);
1168 /* xen_vcpu_setup managed to place the vcpu_info within the
1169 * percpu area for all cpus, so make use of it. Note that for
1170 * PVH we want to use native IRQ mechanism. */
1171 if (have_vcpu_info_placement && !xen_pvh_domain()) {
1172 pv_irq_ops.save_fl = __PV_IS_CALLEE_SAVE(xen_save_fl_direct);
1173 pv_irq_ops.restore_fl = __PV_IS_CALLEE_SAVE(xen_restore_fl_direct);
1174 pv_irq_ops.irq_disable = __PV_IS_CALLEE_SAVE(xen_irq_disable_direct);
1175 pv_irq_ops.irq_enable = __PV_IS_CALLEE_SAVE(xen_irq_enable_direct);
1176 pv_mmu_ops.read_cr2 = xen_read_cr2_direct;
1180 static unsigned xen_patch(u8 type, u16 clobbers, void *insnbuf,
1181 unsigned long addr, unsigned len)
1183 char *start, *end, *reloc;
1186 start = end = reloc = NULL;
1188 #define SITE(op, x) \
1189 case PARAVIRT_PATCH(op.x): \
1190 if (have_vcpu_info_placement) { \
1191 start = (char *)xen_##x##_direct; \
1192 end = xen_##x##_direct_end; \
1193 reloc = xen_##x##_direct_reloc; \
1198 SITE(pv_irq_ops, irq_enable);
1199 SITE(pv_irq_ops, irq_disable);
1200 SITE(pv_irq_ops, save_fl);
1201 SITE(pv_irq_ops, restore_fl);
1205 if (start == NULL || (end-start) > len)
1208 ret = paravirt_patch_insns(insnbuf, len, start, end);
1210 /* Note: because reloc is assigned from something that
1211 appears to be an array, gcc assumes it's non-null,
1212 but doesn't know its relationship with start and
1214 if (reloc > start && reloc < end) {
1215 int reloc_off = reloc - start;
1216 long *relocp = (long *)(insnbuf + reloc_off);
1217 long delta = start - (char *)addr;
1225 ret = paravirt_patch_default(type, clobbers, insnbuf,
1233 static const struct pv_info xen_info __initconst = {
1234 .shared_kernel_pmd = 0,
1236 #ifdef CONFIG_X86_64
1237 .extra_user_64bit_cs = FLAT_USER_CS64,
1242 static const struct pv_init_ops xen_init_ops __initconst = {
1246 static const struct pv_cpu_ops xen_cpu_ops __initconst = {
1249 .set_debugreg = xen_set_debugreg,
1250 .get_debugreg = xen_get_debugreg,
1254 .read_cr0 = xen_read_cr0,
1255 .write_cr0 = xen_write_cr0,
1257 .read_cr4 = native_read_cr4,
1258 .write_cr4 = xen_write_cr4,
1260 #ifdef CONFIG_X86_64
1261 .read_cr8 = xen_read_cr8,
1262 .write_cr8 = xen_write_cr8,
1265 .wbinvd = native_wbinvd,
1267 .read_msr = xen_read_msr,
1268 .write_msr = xen_write_msr,
1270 .read_msr_safe = xen_read_msr_safe,
1271 .write_msr_safe = xen_write_msr_safe,
1273 .read_pmc = xen_read_pmc,
1276 #ifdef CONFIG_X86_64
1277 .usergs_sysret64 = xen_sysret64,
1280 .load_tr_desc = paravirt_nop,
1281 .set_ldt = xen_set_ldt,
1282 .load_gdt = xen_load_gdt,
1283 .load_idt = xen_load_idt,
1284 .load_tls = xen_load_tls,
1285 #ifdef CONFIG_X86_64
1286 .load_gs_index = xen_load_gs_index,
1289 .alloc_ldt = xen_alloc_ldt,
1290 .free_ldt = xen_free_ldt,
1292 .store_idt = native_store_idt,
1293 .store_tr = xen_store_tr,
1295 .write_ldt_entry = xen_write_ldt_entry,
1296 .write_gdt_entry = xen_write_gdt_entry,
1297 .write_idt_entry = xen_write_idt_entry,
1298 .load_sp0 = xen_load_sp0,
1300 .set_iopl_mask = xen_set_iopl_mask,
1301 .io_delay = xen_io_delay,
1303 /* Xen takes care of %gs when switching to usermode for us */
1304 .swapgs = paravirt_nop,
1306 .start_context_switch = paravirt_start_context_switch,
1307 .end_context_switch = xen_end_context_switch,
1310 static void xen_reboot(int reason)
1312 struct sched_shutdown r = { .reason = reason };
1315 for_each_online_cpu(cpu)
1316 xen_pmu_finish(cpu);
1318 if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
1322 static void xen_restart(char *msg)
1324 xen_reboot(SHUTDOWN_reboot);
1327 static void xen_emergency_restart(void)
1329 xen_reboot(SHUTDOWN_reboot);
1332 static void xen_machine_halt(void)
1334 xen_reboot(SHUTDOWN_poweroff);
1337 static void xen_machine_power_off(void)
1341 xen_reboot(SHUTDOWN_poweroff);
1344 static void xen_crash_shutdown(struct pt_regs *regs)
1346 xen_reboot(SHUTDOWN_crash);
1350 xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
1352 if (!kexec_crash_loaded())
1353 xen_reboot(SHUTDOWN_crash);
1357 static struct notifier_block xen_panic_block = {
1358 .notifier_call= xen_panic_event,
1362 int xen_panic_handler_init(void)
1364 atomic_notifier_chain_register(&panic_notifier_list, &xen_panic_block);
1368 static const struct machine_ops xen_machine_ops __initconst = {
1369 .restart = xen_restart,
1370 .halt = xen_machine_halt,
1371 .power_off = xen_machine_power_off,
1372 .shutdown = xen_machine_halt,
1373 .crash_shutdown = xen_crash_shutdown,
1374 .emergency_restart = xen_emergency_restart,
1377 static unsigned char xen_get_nmi_reason(void)
1379 unsigned char reason = 0;
1381 /* Construct a value which looks like it came from port 0x61. */
1382 if (test_bit(_XEN_NMIREASON_io_error,
1383 &HYPERVISOR_shared_info->arch.nmi_reason))
1384 reason |= NMI_REASON_IOCHK;
1385 if (test_bit(_XEN_NMIREASON_pci_serr,
1386 &HYPERVISOR_shared_info->arch.nmi_reason))
1387 reason |= NMI_REASON_SERR;
1392 static void __init xen_boot_params_init_edd(void)
1394 #if IS_ENABLED(CONFIG_EDD)
1395 struct xen_platform_op op;
1396 struct edd_info *edd_info;
1401 edd_info = boot_params.eddbuf;
1402 mbr_signature = boot_params.edd_mbr_sig_buffer;
1404 op.cmd = XENPF_firmware_info;
1406 op.u.firmware_info.type = XEN_FW_DISK_INFO;
1407 for (nr = 0; nr < EDDMAXNR; nr++) {
1408 struct edd_info *info = edd_info + nr;
1410 op.u.firmware_info.index = nr;
1411 info->params.length = sizeof(info->params);
1412 set_xen_guest_handle(op.u.firmware_info.u.disk_info.edd_params,
1414 ret = HYPERVISOR_platform_op(&op);
1418 #define C(x) info->x = op.u.firmware_info.u.disk_info.x
1421 C(interface_support);
1422 C(legacy_max_cylinder);
1424 C(legacy_sectors_per_track);
1427 boot_params.eddbuf_entries = nr;
1429 op.u.firmware_info.type = XEN_FW_DISK_MBR_SIGNATURE;
1430 for (nr = 0; nr < EDD_MBR_SIG_MAX; nr++) {
1431 op.u.firmware_info.index = nr;
1432 ret = HYPERVISOR_platform_op(&op);
1435 mbr_signature[nr] = op.u.firmware_info.u.disk_mbr_signature.mbr_signature;
1437 boot_params.edd_mbr_sig_buf_entries = nr;
1442 * Set up the GDT and segment registers for -fstack-protector. Until
1443 * we do this, we have to be careful not to call any stack-protected
1444 * function, which is most of the kernel.
1446 * Note, that it is __ref because the only caller of this after init
1447 * is PVH which is not going to use xen_load_gdt_boot or other
1450 static void __ref xen_setup_gdt(int cpu)
1452 if (xen_feature(XENFEAT_auto_translated_physmap)) {
1453 #ifdef CONFIG_X86_64
1454 unsigned long dummy;
1456 load_percpu_segment(cpu); /* We need to access per-cpu area */
1457 switch_to_new_gdt(cpu); /* GDT and GS set */
1459 /* We are switching of the Xen provided GDT to our HVM mode
1460 * GDT. The new GDT has __KERNEL_CS with CS.L = 1
1461 * and we are jumping to reload it.
1463 asm volatile (UNWIND_HINT_SAVE
1465 "leaq 1f(%%rip),%0\n"
1470 : "=&r" (dummy) : "0" (__KERNEL_CS));
1473 * While not needed, we also set the %es, %ds, and %fs
1474 * to zero. We don't care about %ss as it is NULL.
1475 * Strictly speaking this is not needed as Xen zeros those
1476 * out (and also MSR_FS_BASE, MSR_GS_BASE, MSR_KERNEL_GS_BASE)
1478 * Linux zeros them in cpu_init() and in secondary_startup_64
1485 /* PVH: TODO Implement. */
1488 return; /* PVH does not need any PV GDT ops. */
1490 pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry_boot;
1491 pv_cpu_ops.load_gdt = xen_load_gdt_boot;
1493 setup_stack_canary_segment(0);
1494 switch_to_new_gdt(0);
1496 pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry;
1497 pv_cpu_ops.load_gdt = xen_load_gdt;
1500 #ifdef CONFIG_XEN_PVH
1502 * A PV guest starts with default flags that are not set for PVH, set them
1505 static void xen_pvh_set_cr_flags(int cpu)
1508 /* Some of these are setup in 'secondary_startup_64'. The others:
1509 * X86_CR0_TS, X86_CR0_PE, X86_CR0_ET are set by Xen for HVM guests
1510 * (which PVH shared codepaths), while X86_CR0_PG is for PVH. */
1511 write_cr0(read_cr0() | X86_CR0_MP | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM);
1516 * For BSP, PSE PGE are set in probe_page_size_mask(), for APs
1517 * set them here. For all, OSFXSR OSXMMEXCPT are set in fpu__init_cpu().
1519 if (boot_cpu_has(X86_FEATURE_PSE))
1520 cr4_set_bits_and_update_boot(X86_CR4_PSE);
1522 if (boot_cpu_has(X86_FEATURE_PGE))
1523 cr4_set_bits_and_update_boot(X86_CR4_PGE);
1527 * Note, that it is ref - because the only caller of this after init
1528 * is PVH which is not going to use xen_load_gdt_boot or other
1531 void __ref xen_pvh_secondary_vcpu_init(int cpu)
1534 xen_pvh_set_cr_flags(cpu);
1537 static void __init xen_pvh_early_guest_init(void)
1539 if (!xen_feature(XENFEAT_auto_translated_physmap))
1542 if (!xen_feature(XENFEAT_hvm_callback_vector))
1545 xen_have_vector_callback = 1;
1547 xen_pvh_early_cpu_init(0, false);
1548 xen_pvh_set_cr_flags(0);
1550 #ifdef CONFIG_X86_32
1551 BUG(); /* PVH: Implement proper support. */
1554 #endif /* CONFIG_XEN_PVH */
1556 static void __init xen_dom0_set_legacy_features(void)
1558 x86_platform.legacy.rtc = 1;
1561 static int xen_cpuhp_setup(void)
1565 rc = cpuhp_setup_state_nocalls(CPUHP_XEN_PREPARE,
1566 "XEN_HVM_GUEST_PREPARE",
1567 xen_cpu_up_prepare, xen_cpu_dead);
1569 rc = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
1570 "XEN_HVM_GUEST_ONLINE",
1571 xen_cpu_up_online, NULL);
1573 cpuhp_remove_state_nocalls(CPUHP_XEN_PREPARE);
1576 return rc >= 0 ? 0 : rc;
1579 /* First C function to be called on Xen boot */
1580 asmlinkage __visible void __init xen_start_kernel(void)
1582 struct physdev_set_iopl set_iopl;
1583 unsigned long initrd_start = 0;
1586 if (!xen_start_info)
1589 xen_domain_type = XEN_PV_DOMAIN;
1591 xen_setup_features();
1592 #ifdef CONFIG_XEN_PVH
1593 xen_pvh_early_guest_init();
1595 xen_setup_machphys_mapping();
1597 /* Install Xen paravirt ops */
1599 pv_init_ops = xen_init_ops;
1600 if (!xen_pvh_domain()) {
1601 pv_cpu_ops = xen_cpu_ops;
1603 x86_platform.get_nmi_reason = xen_get_nmi_reason;
1606 if (xen_feature(XENFEAT_auto_translated_physmap))
1607 x86_init.resources.memory_setup = xen_auto_xlated_memory_setup;
1609 x86_init.resources.memory_setup = xen_memory_setup;
1610 x86_init.oem.arch_setup = xen_arch_setup;
1611 x86_init.oem.banner = xen_banner;
1613 xen_init_time_ops();
1616 * Set up some pagetable state before starting to set any ptes.
1621 /* Prevent unwanted bits from being set in PTEs. */
1622 __supported_pte_mask &= ~_PAGE_GLOBAL;
1625 * Prevent page tables from being allocated in highmem, even
1626 * if CONFIG_HIGHPTE is enabled.
1628 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
1630 /* Work out if we support NX */
1634 xen_build_dynamic_phys_to_machine();
1637 * Set up kernel GDT and segment registers, mainly so that
1638 * -fstack-protector code can be executed.
1643 xen_init_cpuid_mask();
1644 xen_init_capabilities();
1646 #ifdef CONFIG_X86_LOCAL_APIC
1648 * set up the basic apic ops.
1653 if (xen_feature(XENFEAT_mmu_pt_update_preserve_ad)) {
1654 pv_mmu_ops.ptep_modify_prot_start = xen_ptep_modify_prot_start;
1655 pv_mmu_ops.ptep_modify_prot_commit = xen_ptep_modify_prot_commit;
1658 machine_ops = xen_machine_ops;
1661 * The only reliable way to retain the initial address of the
1662 * percpu gdt_page is to remember it here, so we can go and
1663 * mark it RW later, when the initial percpu area is freed.
1665 xen_initial_gdt = &per_cpu(gdt_page, 0);
1669 #ifdef CONFIG_ACPI_NUMA
1671 * The pages we from Xen are not related to machine pages, so
1672 * any NUMA information the kernel tries to get from ACPI will
1673 * be meaningless. Prevent it from trying.
1677 /* Don't do the full vcpu_info placement stuff until we have a
1678 possible map and a non-dummy shared_info. */
1679 per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0];
1681 WARN_ON(xen_cpuhp_setup());
1683 local_irq_disable();
1684 early_boot_irqs_disabled = true;
1686 xen_raw_console_write("mapping kernel into physical memory\n");
1687 xen_setup_kernel_pagetable((pgd_t *)xen_start_info->pt_base,
1688 xen_start_info->nr_pages);
1689 xen_reserve_special_pages();
1691 /* keep using Xen gdt for now; no urgent need to change it */
1693 #ifdef CONFIG_X86_32
1694 pv_info.kernel_rpl = 1;
1695 if (xen_feature(XENFEAT_supervisor_mode_kernel))
1696 pv_info.kernel_rpl = 0;
1698 pv_info.kernel_rpl = 0;
1700 /* set the limit of our address space */
1703 /* PVH: runs at default kernel iopl of 0 */
1704 if (!xen_pvh_domain()) {
1706 * We used to do this in xen_arch_setup, but that is too late
1707 * on AMD were early_cpu_init (run before ->arch_setup()) calls
1708 * early_amd_init which pokes 0xcf8 port.
1711 rc = HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
1713 xen_raw_printk("physdev_op failed %d\n", rc);
1716 #ifdef CONFIG_X86_32
1717 /* set up basic CPUID stuff */
1718 cpu_detect(&new_cpu_data);
1719 set_cpu_cap(&new_cpu_data, X86_FEATURE_FPU);
1720 new_cpu_data.wp_works_ok = 1;
1721 new_cpu_data.x86_capability[CPUID_1_EDX] = cpuid_edx(1);
1724 if (xen_start_info->mod_start) {
1725 if (xen_start_info->flags & SIF_MOD_START_PFN)
1726 initrd_start = PFN_PHYS(xen_start_info->mod_start);
1728 initrd_start = __pa(xen_start_info->mod_start);
1731 /* Poke various useful things into boot_params */
1732 boot_params.hdr.type_of_loader = (9 << 4) | 0;
1733 boot_params.hdr.ramdisk_image = initrd_start;
1734 boot_params.hdr.ramdisk_size = xen_start_info->mod_len;
1735 boot_params.hdr.cmd_line_ptr = __pa(xen_start_info->cmd_line);
1736 boot_params.hdr.hardware_subarch = X86_SUBARCH_XEN;
1738 if (!xen_initial_domain()) {
1739 add_preferred_console("xenboot", 0, NULL);
1740 add_preferred_console("tty", 0, NULL);
1741 add_preferred_console("hvc", 0, NULL);
1743 x86_init.pci.arch_init = pci_xen_init;
1745 const struct dom0_vga_console_info *info =
1746 (void *)((char *)xen_start_info +
1747 xen_start_info->console.dom0.info_off);
1748 struct xen_platform_op op = {
1749 .cmd = XENPF_firmware_info,
1750 .interface_version = XENPF_INTERFACE_VERSION,
1751 .u.firmware_info.type = XEN_FW_KBD_SHIFT_FLAGS,
1754 x86_platform.set_legacy_features =
1755 xen_dom0_set_legacy_features;
1756 xen_init_vga(info, xen_start_info->console.dom0.info_size);
1757 xen_start_info->console.domU.mfn = 0;
1758 xen_start_info->console.domU.evtchn = 0;
1760 if (HYPERVISOR_platform_op(&op) == 0)
1761 boot_params.kbd_status = op.u.firmware_info.u.kbd_shift_flags;
1763 /* Make sure ACS will be enabled */
1766 xen_acpi_sleep_register();
1768 /* Avoid searching for BIOS MP tables */
1769 x86_init.mpparse.find_smp_config = x86_init_noop;
1770 x86_init.mpparse.get_smp_config = x86_init_uint_noop;
1772 xen_boot_params_init_edd();
1775 /* PCI BIOS service won't work from a PV guest. */
1776 pci_probe &= ~PCI_PROBE_BIOS;
1778 xen_raw_console_write("about to get started...\n");
1780 /* Let's presume PV guests always boot on vCPU with id 0. */
1781 per_cpu(xen_vcpu_id, 0) = 0;
1783 xen_setup_runstate_info(0);
1787 /* Start the world */
1788 #ifdef CONFIG_X86_32
1789 i386_start_kernel();
1791 cr4_init_shadow(); /* 32b kernel does this in i386_start_kernel() */
1792 x86_64_start_reservations((char *)__pa_symbol(&boot_params));
1796 void __ref xen_hvm_init_shared_info(void)
1799 struct xen_add_to_physmap xatp;
1800 static struct shared_info *shared_info_page = 0;
1802 if (!shared_info_page)
1803 shared_info_page = (struct shared_info *)
1804 extend_brk(PAGE_SIZE, PAGE_SIZE);
1805 xatp.domid = DOMID_SELF;
1807 xatp.space = XENMAPSPACE_shared_info;
1808 xatp.gpfn = __pa(shared_info_page) >> PAGE_SHIFT;
1809 if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))
1812 HYPERVISOR_shared_info = (struct shared_info *)shared_info_page;
1814 /* xen_vcpu is a pointer to the vcpu_info struct in the shared_info
1815 * page, we use it in the event channel upcall and in some pvclock
1816 * related functions. We don't need the vcpu_info placement
1817 * optimizations because we don't use any pv_mmu or pv_irq op on
1819 * When xen_hvm_init_shared_info is run at boot time only vcpu 0 is
1820 * online but xen_hvm_init_shared_info is run at resume time too and
1821 * in that case multiple vcpus might be online. */
1822 for_each_online_cpu(cpu) {
1823 /* Leave it to be NULL. */
1824 if (xen_vcpu_nr(cpu) >= MAX_VIRT_CPUS)
1826 per_cpu(xen_vcpu, cpu) =
1827 &HYPERVISOR_shared_info->vcpu_info[xen_vcpu_nr(cpu)];
1831 #ifdef CONFIG_XEN_PVHVM
1832 static void __init init_hvm_pv_info(void)
1835 uint32_t eax, ebx, ecx, edx, pages, msr, base;
1838 base = xen_cpuid_base();
1839 cpuid(base + 1, &eax, &ebx, &ecx, &edx);
1842 minor = eax & 0xffff;
1843 printk(KERN_INFO "Xen version %d.%d.\n", major, minor);
1845 cpuid(base + 2, &pages, &msr, &ecx, &edx);
1847 pfn = __pa(hypercall_page);
1848 wrmsr_safe(msr, (u32)pfn, (u32)(pfn >> 32));
1850 xen_setup_features();
1852 cpuid(base + 4, &eax, &ebx, &ecx, &edx);
1853 if (eax & XEN_HVM_CPUID_VCPU_ID_PRESENT)
1854 this_cpu_write(xen_vcpu_id, ebx);
1856 this_cpu_write(xen_vcpu_id, smp_processor_id());
1858 pv_info.name = "Xen HVM";
1860 xen_domain_type = XEN_HVM_DOMAIN;
1864 static int xen_cpu_up_prepare(unsigned int cpu)
1868 if (xen_hvm_domain()) {
1870 * This can happen if CPU was offlined earlier and
1871 * offlining timed out in common_cpu_die().
1873 if (cpu_report_state(cpu) == CPU_DEAD_FROZEN) {
1874 xen_smp_intr_free(cpu);
1875 xen_uninit_lock_cpu(cpu);
1878 if (cpu_acpi_id(cpu) != U32_MAX)
1879 per_cpu(xen_vcpu_id, cpu) = cpu_acpi_id(cpu);
1881 per_cpu(xen_vcpu_id, cpu) = cpu;
1882 xen_vcpu_setup(cpu);
1885 if (xen_pv_domain() ||
1886 (xen_have_vector_callback &&
1887 xen_feature(XENFEAT_hvm_safe_pvclock)))
1888 xen_setup_timer(cpu);
1890 rc = xen_smp_intr_init(cpu);
1892 WARN(1, "xen_smp_intr_init() for CPU %d failed: %d\n",
1899 static int xen_cpu_dead(unsigned int cpu)
1901 xen_smp_intr_free(cpu);
1903 if (xen_pv_domain() ||
1904 (xen_have_vector_callback &&
1905 xen_feature(XENFEAT_hvm_safe_pvclock)))
1906 xen_teardown_timer(cpu);
1911 static int xen_cpu_up_online(unsigned int cpu)
1913 xen_init_lock_cpu(cpu);
1917 #ifdef CONFIG_XEN_PVHVM
1918 #ifdef CONFIG_KEXEC_CORE
1919 static void xen_hvm_shutdown(void)
1921 native_machine_shutdown();
1922 if (kexec_in_progress)
1923 xen_reboot(SHUTDOWN_soft_reset);
1926 static void xen_hvm_crash_shutdown(struct pt_regs *regs)
1928 native_machine_crash_shutdown(regs);
1929 xen_reboot(SHUTDOWN_soft_reset);
1933 static void __init xen_hvm_guest_init(void)
1935 if (xen_pv_domain())
1940 xen_hvm_init_shared_info();
1942 xen_panic_handler_init();
1944 if (xen_feature(XENFEAT_hvm_callback_vector))
1945 xen_have_vector_callback = 1;
1947 WARN_ON(xen_cpuhp_setup());
1948 xen_unplug_emulated_devices();
1949 x86_init.irqs.intr_init = xen_init_IRQ;
1950 xen_hvm_init_time_ops();
1951 xen_hvm_init_mmu_ops();
1952 #ifdef CONFIG_KEXEC_CORE
1953 machine_ops.shutdown = xen_hvm_shutdown;
1954 machine_ops.crash_shutdown = xen_hvm_crash_shutdown;
1959 static bool xen_nopv = false;
1960 static __init int xen_parse_nopv(char *arg)
1965 early_param("xen_nopv", xen_parse_nopv);
1967 static uint32_t __init xen_platform(void)
1972 return xen_cpuid_base();
1975 bool xen_hvm_need_lapic(void)
1979 if (xen_pv_domain())
1981 if (!xen_hvm_domain())
1983 if (xen_feature(XENFEAT_hvm_pirqs) && xen_have_vector_callback)
1987 EXPORT_SYMBOL_GPL(xen_hvm_need_lapic);
1989 static void xen_pin_vcpu(int cpu)
1991 static bool disable_pinning;
1992 struct sched_pin_override pin_override;
1995 if (disable_pinning)
1998 pin_override.pcpu = cpu;
1999 ret = HYPERVISOR_sched_op(SCHEDOP_pin_override, &pin_override);
2001 /* Ignore errors when removing override. */
2007 pr_warn("Unable to pin on physical cpu %d. In case of problems consider vcpu pinning.\n",
2009 disable_pinning = true;
2012 WARN(1, "Trying to pin vcpu without having privilege to do so\n");
2013 disable_pinning = true;
2017 pr_warn("Physical cpu %d not available for pinning. Check Xen cpu configuration.\n",
2023 WARN(1, "rc %d while trying to pin vcpu\n", ret);
2024 disable_pinning = true;
2028 const struct hypervisor_x86 x86_hyper_xen = {
2030 .detect = xen_platform,
2031 #ifdef CONFIG_XEN_PVHVM
2032 .init_platform = xen_hvm_guest_init,
2034 .x2apic_available = xen_x2apic_para_available,
2035 .pin_vcpu = xen_pin_vcpu,
2037 EXPORT_SYMBOL(x86_hyper_xen);
2039 #ifdef CONFIG_HOTPLUG_CPU
2040 void xen_arch_register_cpu(int num)
2042 arch_register_cpu(num);
2044 EXPORT_SYMBOL(xen_arch_register_cpu);
2046 void xen_arch_unregister_cpu(int num)
2048 arch_unregister_cpu(num);
2050 EXPORT_SYMBOL(xen_arch_unregister_cpu);