[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v2] x86/pv: Fix assertions in svm_load_segs()
OSSTest has shown an assertion failure: http://logs.test-lab.xenproject.org/osstest/logs/153906/test-xtf-amd64-amd64-1/serial-rimava1.log This is because we pass a non-NUL selector into svm_load_segs(), which is something we must not do, as this path does not load the attributes/limit from the GDT/LDT. Drop the {gs,gs}_sel parameters from svm_load_segs() and use 0 instead. This is fine even for non-zero NUL segments, as it is how the IRET instruction behaves in all CPUs. Only use the svm_load_segs() path when FS and GS are NUL, which is the common case when scheduling a 64bit vcpu with 64bit userspace in context. Fixes: ad0fd291c5 ("x86/pv: Rewrite segment context switching from scratch") Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> --- CC: Jan Beulich <JBeulich@xxxxxxxx> CC: Roger Pau Monné <roger.pau@xxxxxxxxxx> CC: Wei Liu <wl@xxxxxxx> v2: * Rewrite from scratch. --- xen/arch/x86/domain.c | 7 +++---- xen/arch/x86/hvm/svm/svm.c | 9 +++------ xen/include/asm-x86/hvm/svm/svm.h | 6 ++++-- 3 files changed, 10 insertions(+), 12 deletions(-) diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c index d4f4ced681..e8e91cf080 100644 --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -1557,7 +1557,7 @@ static void load_segments(struct vcpu *n) : [_val] "rm" (val) ) #ifdef CONFIG_HVM - if ( cpu_has_svm && !compat ) + if ( cpu_has_svm && !compat && (uregs->fs | uregs->gs) <= 3 ) { unsigned long gsb = n->arch.flags & TF_kernel_mode ? n->arch.pv.gs_base_kernel : n->arch.pv.gs_base_user; @@ -1565,8 +1565,7 @@ static void load_segments(struct vcpu *n) ? n->arch.pv.gs_base_user : n->arch.pv.gs_base_kernel; fs_gs_done = svm_load_segs(n->arch.pv.ldt_ents, LDT_VIRT_START(n), - uregs->fs, n->arch.pv.fs_base, - uregs->gs, gsb, gss); + n->arch.pv.fs_base, gsb, gss); } #endif if ( !fs_gs_done ) @@ -1929,7 +1928,7 @@ static void __context_switch(void) /* Prefetch the VMCB if we expect to use it later in the context switch */ if ( cpu_has_svm && is_pv_domain(nd) && !is_pv_32bit_domain(nd) && !is_idle_domain(nd) ) - svm_load_segs(0, 0, 0, 0, 0, 0, 0); + svm_load_segs(0, 0, 0, 0, 0); #endif if ( need_full_gdt(nd) && !per_cpu(full_gdt_loaded, cpu) ) diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c index af584ff5d1..23b2a2aa17 100644 --- a/xen/arch/x86/hvm/svm/svm.c +++ b/xen/arch/x86/hvm/svm/svm.c @@ -1521,8 +1521,7 @@ static void svm_init_erratum_383(const struct cpuinfo_x86 *c) #ifdef CONFIG_PV bool svm_load_segs(unsigned int ldt_ents, unsigned long ldt_base, - unsigned int fs_sel, unsigned long fs_base, - unsigned int gs_sel, unsigned long gs_base, + unsigned long fs_base, unsigned long gs_base, unsigned long gs_shadow) { unsigned int cpu = smp_processor_id(); @@ -1559,14 +1558,12 @@ bool svm_load_segs(unsigned int ldt_ents, unsigned long ldt_base, vmcb->ldtr.base = ldt_base; } - ASSERT(!(fs_sel & ~3)); - vmcb->fs.sel = fs_sel; + vmcb->fs.sel = 0; vmcb->fs.attr = 0; vmcb->fs.limit = 0; vmcb->fs.base = fs_base; - ASSERT(!(gs_sel & ~3)); - vmcb->gs.sel = gs_sel; + vmcb->gs.sel = 0; vmcb->gs.attr = 0; vmcb->gs.limit = 0; vmcb->gs.base = gs_base; diff --git a/xen/include/asm-x86/hvm/svm/svm.h b/xen/include/asm-x86/hvm/svm/svm.h index d568e86db9..2310878e41 100644 --- a/xen/include/asm-x86/hvm/svm/svm.h +++ b/xen/include/asm-x86/hvm/svm/svm.h @@ -52,10 +52,12 @@ void svm_update_guest_cr(struct vcpu *, unsigned int cr, unsigned int flags); /* * PV context switch helper. Calls with zero ldt_base request a prefetch of * the VMCB area to be loaded from, instead of an actual load of state. + * + * Must only be used for NUL FS/GS, as the segment attributes/limits are not + * read from the GDT/LDT. */ bool svm_load_segs(unsigned int ldt_ents, unsigned long ldt_base, - unsigned int fs_sel, unsigned long fs_base, - unsigned int gs_sel, unsigned long gs_base, + unsigned long fs_base, unsigned long gs_base, unsigned long gs_shadow); extern u32 svm_feature_flags; -- 2.11.0
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |