|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] vMCE vs migration
On Mon, Feb 13, Jan Beulich wrote:
> >>> On 10.02.12 at 22:28, Olaf Hering <olaf@xxxxxxxxx> wrote:
> > These functions are called for dom0, but not for domU. And as a result
> > arch.nr_vmce_banks remains zero. I assume the guest needs to be
> > initialized in some way as well, and that does not happen?
>
> Below/attached a fixed version of the patch.
I get some mismatch after migration, both hosts run the same xen binary.
The small debug patch I use is attached.
Also: The tools do not catch the restore error so that the guest continues to
run on the source host.
Olaf
nicolai login: (XEN) vmce_init_vcpu 0 o 0 n 806
(XEN) vmce_init_vcpu 1 o 0 n 806
(XEN) vmce_init_vcpu 2 o 0 n 806
(XEN) vmce_init_vcpu 3 o 0 n 806
(XEN) save.c:62:d0 HVM restore (1): VM saved on one CPU (0x206c2) and restored
on another (0x10676).
(XEN) save.c:234:d0 HVM restore: CPU 0
(XEN) save.c:234:d0 HVM restore: CPU 1
(XEN) save.c:234:d0 HVM restore: CPU 2
(XEN) save.c:234:d0 HVM restore: CPU 3
(XEN) save.c:234:d0 HVM restore: PIC 0
(XEN) save.c:234:d0 HVM restore: PIC 1
(XEN) save.c:234:d0 HVM restore: IOAPIC 0
(XEN) save.c:234:d0 HVM restore: LAPIC 0
(XEN) save.c:234:d0 HVM restore: LAPIC 1
(XEN) save.c:234:d0 HVM restore: LAPIC 2
(XEN) save.c:234:d0 HVM restore: LAPIC 3
(XEN) save.c:234:d0 HVM restore: LAPIC_REGS 0
(XEN) save.c:234:d0 HVM restore: LAPIC_REGS 1
(XEN) save.c:234:d0 HVM restore: LAPIC_REGS 2
(XEN) save.c:234:d0 HVM restore: LAPIC_REGS 3
(XEN) save.c:234:d0 HVM restore: PCI_IRQ 0
(XEN) save.c:234:d0 HVM restore: ISA_IRQ 0
(XEN) save.c:234:d0 HVM restore: PCI_LINK 0
(XEN) save.c:234:d0 HVM restore: PIT 0
(XEN) save.c:234:d0 HVM restore: RTC 0
(XEN) save.c:234:d0 HVM restore: HPET 0
(XEN) save.c:234:d0 HVM restore: PMTIMER 0
(XEN) save.c:234:d0 HVM restore: MTRR 0
(XEN) save.c:234:d0 HVM restore: MTRR 1
(XEN) save.c:234:d0 HVM restore: MTRR 2
(XEN) save.c:234:d0 HVM restore: MTRR 3
(XEN) save.c:234:d0 HVM restore: VMCE_VCPU 0
(XEN) save.c:291:d0 HVM restore mismatch: expected type 18 length 8, saw type
18 length 1
(XEN) vmce.c:360:d0 vmce_load_vcpu_ctxt ffff82c4802c7d28 ffffffff -1 o 806 n ea
(XEN) save.c:239:d0 HVM restore: failed to load entry 18/0
(XEN) vmce_init_vcpu 0 o 0 n 806
(XEN) vmce_init_vcpu 1 o 0 n 806
(XEN) vmce_init_vcpu 2 o 0 n 806
(XEN) vmce_init_vcpu 3 o 0 n 806
(XEN) save.c:62:d0 HVM restore (2): VM saved on one CPU (0x206c2) and restored
on another (0x10676).
(XEN) save.c:234:d0 HVM restore: CPU 0
(XEN) save.c:234:d0 HVM restore: CPU 1
(XEN) save.c:234:d0 HVM restore: CPU 2
(XEN) save.c:234:d0 HVM restore: CPU 3
(XEN) save.c:234:d0 HVM restore: PIC 0
(XEN) save.c:234:d0 HVM restore: PIC 1
(XEN) save.c:234:d0 HVM restore: IOAPIC 0
(XEN) save.c:234:d0 HVM restore: LAPIC 0
(XEN) save.c:234:d0 HVM restore: LAPIC 1
(XEN) save.c:234:d0 HVM restore: LAPIC 2
(XEN) save.c:234:d0 HVM restore: LAPIC 3
(XEN) save.c:234:d0 HVM restore: LAPIC_REGS 0
(XEN) save.c:234:d0 HVM restore: LAPIC_REGS 1
(XEN) save.c:234:d0 HVM restore: LAPIC_REGS 2
(XEN) save.c:234:d0 HVM restore: LAPIC_REGS 3
(XEN) save.c:234:d0 HVM restore: PCI_IRQ 0
(XEN) save.c:234:d0 HVM restore: ISA_IRQ 0
(XEN) save.c:234:d0 HVM restore: PCI_LINK 0
(XEN) save.c:234:d0 HVM restore: PIT 0
(XEN) save.c:234:d0 HVM restore: RTC 0
(XEN) save.c:234:d0 HVM restore: HPET 0
(XEN) save.c:234:d0 HVM restore: PMTIMER 0
(XEN) save.c:234:d0 HVM restore: MTRR 0
(XEN) save.c:234:d0 HVM restore: MTRR 1
(XEN) save.c:234:d0 HVM restore: MTRR 2
(XEN) save.c:234:d0 HVM restore: MTRR 3
(XEN) save.c:234:d0 HVM restore: VMCE_VCPU 0
(XEN) vmce.c:360:d0 vmce_load_vcpu_ctxt ffff83082e377d28 0 0 o 806 n 1809
(XEN) vmce.c:77: HVM restore: unsupported MCA capabilities 0x1809 for d2:v0
(supported: 0x800)
(XEN) save.c:239:d0 HVM restore: failed to load entry 18/0
diff -r cbb1cce5fac0 xen/arch/x86/cpu/mcheck/mce.c
--- a/xen/arch/x86/cpu/mcheck/mce.c
+++ b/xen/arch/x86/cpu/mcheck/mce.c
@@ -743,6 +743,7 @@ int mca_cap_init(void)
{
int i;
+ printk("%s: nr_mce_banks %x\n", __func__, nr_mce_banks);
mca_allbanks = mcabanks_alloc();
for ( i = 0; i < nr_mce_banks; i++)
mcabanks_set(i, mca_allbanks);
diff -r cbb1cce5fac0 xen/arch/x86/cpu/mcheck/vmce.c
--- a/xen/arch/x86/cpu/mcheck/vmce.c
+++ b/xen/arch/x86/cpu/mcheck/vmce.c
@@ -63,6 +63,7 @@ void vmce_destroy_msr(struct domain *d)
void vmce_init_vcpu(struct vcpu *v)
{
+ printk("%s %u o %lx n %lx\n", __func__, v->vcpu_id, v->arch.mcg_cap,
g_mcg_cap);
v->arch.mcg_cap = g_mcg_cap;
}
@@ -331,6 +332,7 @@ static int vmce_save_vcpu_ctxt(struct do
};
err = hvm_save_entry(VMCE_VCPU, v->vcpu_id, h, &ctxt);
+ gdprintk(XENLOG_ERR, "%s %p %u %x %d o %lx\n", __func__, h,
v->vcpu_id, err, err, ctxt.caps);
if ( err )
break;
}
@@ -352,8 +354,11 @@ static int vmce_load_vcpu_ctxt(struct do
err = -EINVAL;
}
else
+ {
err = hvm_load_entry(VMCE_VCPU, h, &ctxt);
+ gdprintk(XENLOG_ERR, "%s %p %x %d o %lx n %lx\n", __func__, h, err, err,
v->arch.mcg_cap, ctxt.caps);
+ }
return err ?: vmce_restore_vcpu(v, ctxt.caps);
}
diff -r cbb1cce5fac0 xen/arch/x86/domctl.c
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -1027,6 +1027,7 @@ long arch_do_domctl(
evc->syscall32_callback_eip = 0;
evc->syscall32_disables_events = 0;
}
+ gdprintk(XENLOG_ERR, "%s %u n %lx o %lx\n", __func__, v->vcpu_id,
v->arch.mcg_cap, evc->mcg_cap);
evc->mcg_cap = v->arch.mcg_cap;
}
else
@@ -1061,6 +1062,7 @@ long arch_do_domctl(
evc->syscall32_callback_eip )
goto ext_vcpucontext_out;
+ gdprintk(XENLOG_ERR, "%s %u n %lx o %lx\n", __func__, v->vcpu_id,
v->arch.mcg_cap, evc->mcg_cap);
if ( evc->size >= offsetof(typeof(*evc), mcg_cap) +
sizeof(evc->mcg_cap) )
ret = vmce_restore_vcpu(v, evc->mcg_cap);
diff -r cbb1cce5fac0 xen/arch/x86/traps.c
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -3129,6 +3129,8 @@ void do_general_protection(struct cpu_us
{
dprintk(XENLOG_INFO, "GPF (%04x): %p -> %p\n",
regs->error_code, _p(regs->eip), _p(fixup));
+ printk ("%s: %p ", __func__, _p(regs->eip)); print_symbol("%s\n", regs->eip);
+ printk ("%s: %p ", __func__, _p(fixup)); print_symbol("%s\n", fixup);
regs->eip = fixup;
return;
}
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |