[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v2] x86: consolidate legacy FPU state loading
First of all introduce a helper function instead of replicating almost the same code for PV and HVM. The differences between the two pieces of code actually points out an issue (which is also addressed here): In the HVM case FCW would not have been set to FCW_RESET in certain cases (note for example that XRSTOR loads FCW_DEFAULT rather then FCW_RESET when the respective xstate_bv bit is clear). Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> --- v2: hvm_vcpu_reset_state() also uses the new function. --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -834,8 +834,6 @@ int arch_set_info_guest( return -EINVAL; } - v->fpu_initialised = !!(flags & VGCF_I387_VALID); - v->arch.flags &= ~TF_kernel_mode; if ( (flags & VGCF_in_kernel) || is_hvm_domain(d)/*???*/ ) v->arch.flags |= TF_kernel_mode; @@ -842,27 +840,9 @@ int arch_set_info_guest( v->arch.vgc_flags = flags; - if ( flags & VGCF_I387_VALID ) - { - memcpy(v->arch.fpu_ctxt, &c.nat->fpu_ctxt, sizeof(c.nat->fpu_ctxt)); - if ( v->arch.xsave_area ) - v->arch.xsave_area->xsave_hdr.xstate_bv = XSTATE_FP_SSE; - } - else if ( v->arch.xsave_area ) - { - v->arch.xsave_area->xsave_hdr.xstate_bv = 0; - v->arch.xsave_area->fpu_sse.mxcsr = MXCSR_DEFAULT; - } - else - { - typeof(v->arch.xsave_area->fpu_sse) *fpu_sse = v->arch.fpu_ctxt; - - memset(fpu_sse, 0, sizeof(*fpu_sse)); - fpu_sse->fcw = FCW_DEFAULT; - fpu_sse->mxcsr = MXCSR_DEFAULT; - } - if ( v->arch.xsave_area ) - v->arch.xsave_area->xsave_hdr.xcomp_bv = 0; + vcpu_load_fpu(v, v->arch.xsave_area, + flags & VGCF_I387_VALID ? &c.nat->fpu_ctxt : NULL, + FCW_DEFAULT); if ( !compat ) { --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -981,7 +981,6 @@ static int hvm_load_cpu_ctxt(struct doma struct hvm_hw_cpu ctxt; struct segment_register seg; const char *errstr; - struct xsave_struct *xsave_area; /* Which vcpu is this? */ vcpuid = hvm_load_instance(h); @@ -1114,22 +1113,9 @@ static int hvm_load_cpu_ctxt(struct doma hvm_set_segment_register(v, x86_seg_ldtr, &seg); /* Cover xsave-absent save file restoration on xsave-capable host. */ - xsave_area = xsave_enabled(v) ? NULL : v->arch.xsave_area; - - v->fpu_initialised = !!(ctxt.flags & XEN_X86_FPU_INITIALISED); - if ( v->fpu_initialised ) - { - memcpy(v->arch.fpu_ctxt, ctxt.fpu_regs, sizeof(ctxt.fpu_regs)); - if ( xsave_area ) - xsave_area->xsave_hdr.xstate_bv = XSTATE_FP_SSE; - } - else if ( xsave_area ) - { - xsave_area->xsave_hdr.xstate_bv = 0; - xsave_area->fpu_sse.mxcsr = MXCSR_DEFAULT; - } - if ( xsave_area ) - xsave_area->xsave_hdr.xcomp_bv = 0; + vcpu_load_fpu(v, xsave_enabled(v) ? NULL : v->arch.xsave_area, + ctxt.flags & XEN_X86_FPU_INITIALISED ? ctxt.fpu_regs : NULL, + FCW_RESET); v->arch.user_regs.rax = ctxt.rax; v->arch.user_regs.rbx = ctxt.rbx; @@ -3879,7 +3865,6 @@ void hvm_vcpu_reset_state(struct vcpu *v { struct domain *d = v->domain; struct segment_register reg; - typeof(v->arch.xsave_area->fpu_sse) *fpu_ctxt = v->arch.fpu_ctxt; domain_lock(d); @@ -3893,14 +3878,9 @@ void hvm_vcpu_reset_state(struct vcpu *v v->arch.guest_table = pagetable_null(); } - memset(fpu_ctxt, 0, sizeof(*fpu_ctxt)); - fpu_ctxt->fcw = FCW_RESET; - fpu_ctxt->mxcsr = MXCSR_DEFAULT; if ( v->arch.xsave_area ) - { - v->arch.xsave_area->xsave_hdr.xstate_bv = X86_XCR0_FP; - v->arch.xsave_area->xsave_hdr.xcomp_bv = 0; - } + v->arch.xsave_area->xsave_hdr.xstate_bv = 0; + vcpu_load_fpu(v, v->arch.xsave_area, NULL, FCW_RESET); v->arch.vgc_flags = VGCF_online; memset(&v->arch.user_regs, 0, sizeof(v->arch.user_regs)); --- a/xen/arch/x86/i387.c +++ b/xen/arch/x86/i387.c @@ -337,6 +337,49 @@ int vcpu_init_fpu(struct vcpu *v) return rc; } +void vcpu_load_fpu(struct vcpu *v, struct xsave_struct *xsave_area, + const void *data, unsigned int fcw_default) +{ + /* + * For the entire function please note that vcpu_init_fpu() (above) points + * v->arch.fpu_ctxt into v->arch.xsave_area when XSAVE is available. Hence + * accesses through both pointers alias one another, and the shorter form + * is used here. + */ + typeof(xsave_area->fpu_sse) *fpu_sse = v->arch.fpu_ctxt; + + ASSERT(!xsave_area || xsave_area == v->arch.xsave_area); + + v->fpu_initialised = !!data; + + if ( data ) + { + memcpy(fpu_sse, data, sizeof(*fpu_sse)); + if ( xsave_area ) + xsave_area->xsave_hdr.xstate_bv = XSTATE_FP_SSE; + } + else if ( xsave_area && fcw_default == FCW_DEFAULT ) + { + xsave_area->xsave_hdr.xstate_bv = 0; + fpu_sse->mxcsr = MXCSR_DEFAULT; + } + else + { + memset(fpu_sse, 0, sizeof(*fpu_sse)); + fpu_sse->fcw = fcw_default; + fpu_sse->mxcsr = MXCSR_DEFAULT; + if ( v->arch.xsave_area ) + { + v->arch.xsave_area->xsave_hdr.xstate_bv &= ~XSTATE_FP_SSE; + if ( fcw_default != FCW_DEFAULT ) + v->arch.xsave_area->xsave_hdr.xstate_bv |= X86_XCR0_FP; + } + } + + if ( xsave_area ) + xsave_area->xsave_hdr.xcomp_bv = 0; +} + /* Free FPU's context save area */ void vcpu_destroy_fpu(struct vcpu *v) { --- a/xen/include/asm-x86/i387.h +++ b/xen/include/asm-x86/i387.h @@ -34,5 +34,8 @@ void vcpu_save_fpu(struct vcpu *v); void save_fpu_enable(void); int vcpu_init_fpu(struct vcpu *v); +struct xsave_struct; +void vcpu_load_fpu(struct vcpu *v, struct xsave_struct *xsave_area, + const void *data, unsigned int fcw_default); void vcpu_destroy_fpu(struct vcpu *v); #endif /* __ASM_I386_I387_H */ _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/mailman/listinfo/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |