|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH for-4.20 4/4] x86/fpu: Split fpu_setup_fpu() in two
It's doing too many things at once and there's no clear way of defining what
it's meant to do. This patch splits the function in two.
1. A reset function, parameterized by the FCW value. FCW_RESET means to reset
the state to power-on reset values, while FCW_DEFAULT means to reset to the
default values present during vCPU creation.
2. A x87/SSE state loader (equivalent to the old function when it took a data
pointer).
Signed-off-by: Alejandro Vallejo <alejandro.vallejo@xxxxxxxxx>
---
I'm still not sure what the old function tries to do. The state we start vCPUs
in is _similar_ to the after-finit, but it's not quite (`ftw` is not -1). I went
for the "let's not deviate too much from previous behaviour", but maybe we did
intend for vCPUs to start as if `finit` had just been executed?
---
xen/arch/x86/domain.c | 7 +++--
xen/arch/x86/hvm/hvm.c | 19 ++++++++-----
xen/arch/x86/i387.c | 50 +++++++++++----------------------
xen/arch/x86/include/asm/i387.h | 27 +++++++++++++++---
4 files changed, 56 insertions(+), 47 deletions(-)
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index ccadfe0c9e70..245899cc792f 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -1198,9 +1198,10 @@ int arch_set_info_guest(
is_pv_64bit_domain(d) )
v->arch.flags &= ~TF_kernel_mode;
- vcpu_setup_fpu(v, v->arch.xsave_area,
- flags & VGCF_I387_VALID ? &c.nat->fpu_ctxt : NULL,
- FCW_DEFAULT);
+ if ( flags & VGCF_I387_VALID )
+ vcpu_setup_fpu(v, &c.nat->fpu_ctxt);
+ else
+ vcpu_reset_fpu(v, FCW_DEFAULT);
if ( !compat )
{
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 09b1426ee314..bedbd2a0b888 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -1162,10 +1162,17 @@ static int cf_check hvm_load_cpu_ctxt(struct domain *d,
hvm_domain_context_t *h)
seg.attr = ctxt.ldtr_arbytes;
hvm_set_segment_register(v, x86_seg_ldtr, &seg);
- /* Cover xsave-absent save file restoration on xsave-capable host. */
- vcpu_setup_fpu(v, xsave_enabled(v) ? NULL : v->arch.xsave_area,
- ctxt.flags & XEN_X86_FPU_INITIALISED ? ctxt.fpu_regs : NULL,
- FCW_RESET);
+ /*
+ * On Xen 4.1 and later the FPU state is restored on a later HVM context,
so
+ * what we're doing here is initialising the FPU state for guests from even
+ * older versions of Xen. In general such guests only use legacy x87/SSE
+ * state, and if they did use XSAVE then our best-effort strategy is to
make
+ * an XSAVE header for x87 and SSE hoping that's good enough.
+ */
+ if ( ctxt.flags & XEN_X86_FPU_INITIALISED )
+ vcpu_setup_fpu(v, &ctxt.fpu_regs);
+ else
+ vcpu_reset_fpu(v, FCW_RESET);
v->arch.user_regs.rax = ctxt.rax;
v->arch.user_regs.rbx = ctxt.rbx;
@@ -4005,9 +4012,7 @@ void hvm_vcpu_reset_state(struct vcpu *v, uint16_t cs,
uint16_t ip)
v->arch.guest_table = pagetable_null();
}
- if ( v->arch.xsave_area )
- v->arch.xsave_area->xsave_hdr.xstate_bv = 0;
- vcpu_setup_fpu(v, v->arch.xsave_area, NULL, FCW_RESET);
+ vcpu_reset_fpu(v, FCW_RESET);
arch_vcpu_regs_init(v);
v->arch.user_regs.rip = ip;
diff --git a/xen/arch/x86/i387.c b/xen/arch/x86/i387.c
index a964b84757ec..7851f1b3f6e4 100644
--- a/xen/arch/x86/i387.c
+++ b/xen/arch/x86/i387.c
@@ -310,41 +310,25 @@ int vcpu_init_fpu(struct vcpu *v)
return xstate_alloc_save_area(v);
}
-void vcpu_setup_fpu(struct vcpu *v, struct xsave_struct *xsave_area,
- const void *data, unsigned int fcw_default)
+void vcpu_reset_fpu(struct vcpu *v, uint16_t fcw)
{
- fpusse_t *fpu_sse = &v->arch.xsave_area->fpu_sse;
-
- ASSERT(!xsave_area || xsave_area == v->arch.xsave_area);
-
- v->fpu_initialised = !!data;
-
- if ( data )
- {
- memcpy(fpu_sse, data, sizeof(*fpu_sse));
- if ( xsave_area )
- xsave_area->xsave_hdr.xstate_bv = XSTATE_FP_SSE;
- }
- else if ( xsave_area && fcw_default == FCW_DEFAULT )
- {
- xsave_area->xsave_hdr.xstate_bv = 0;
- fpu_sse->mxcsr = MXCSR_DEFAULT;
- }
- else
- {
- memset(fpu_sse, 0, sizeof(*fpu_sse));
- fpu_sse->fcw = fcw_default;
- fpu_sse->mxcsr = MXCSR_DEFAULT;
- if ( v->arch.xsave_area )
- {
- v->arch.xsave_area->xsave_hdr.xstate_bv &= ~XSTATE_FP_SSE;
- if ( fcw_default != FCW_DEFAULT )
- v->arch.xsave_area->xsave_hdr.xstate_bv |= X86_XCR0_X87;
- }
- }
+ v->fpu_initialised = false;
+ *v->arch.xsave_area = (struct xsave_struct) {
+ .fpu_sse = {
+ .mxcsr = MXCSR_DEFAULT,
+ .fcw = fcw,
+ },
+ .xsave_hdr.xstate_bv = fcw == FCW_RESET ? X86_XCR0_X87 : 0,
+ };
+}
- if ( xsave_area )
- xsave_area->xsave_hdr.xcomp_bv = 0;
+void vcpu_setup_fpu(struct vcpu *v, const void *data)
+{
+ v->fpu_initialised = true;
+ *v->arch.xsave_area = (struct xsave_struct) {
+ .fpu_sse = *(fpusse_t*)data,
+ .xsave_hdr.xstate_bv = XSTATE_FP_SSE,
+ };
}
/* Free FPU's context save area */
diff --git a/xen/arch/x86/include/asm/i387.h b/xen/arch/x86/include/asm/i387.h
index a783549db991..ce699fc66663 100644
--- a/xen/arch/x86/include/asm/i387.h
+++ b/xen/arch/x86/include/asm/i387.h
@@ -31,10 +31,29 @@ void vcpu_restore_fpu_nonlazy(struct vcpu *v, bool
need_stts);
void vcpu_restore_fpu_lazy(struct vcpu *v);
void vcpu_save_fpu(struct vcpu *v);
void save_fpu_enable(void);
-
int vcpu_init_fpu(struct vcpu *v);
-struct xsave_struct;
-void vcpu_setup_fpu(struct vcpu *v, struct xsave_struct *xsave_area,
- const void *data, unsigned int fcw_default);
void vcpu_destroy_fpu(struct vcpu *v);
+
+/*
+ * Restore `v`'s FPU to known values
+ *
+ * If fcw == FCW_RESET, then the reset state is power-on RESET.
+ *
+ * Otherwise `mxcsr` is set to `MXCSR_DEFAULT`, `fcw` is overriden with the
+ * `fcw` argument and everything else is zeroed out.
+ *
+ * @param v vCPU containing the FPU
+ * @param fcw Intended FPU Control Word
+ */
+void vcpu_reset_fpu(struct vcpu *v, uint16_t fcw);
+
+/*
+ * Load x87/SSE state into `v`'s FPU
+ *
+ * Overrides the XSAVE header to set the state components to be x87 and SSE.
+ *
+ * @param v vCPU containing the FPU
+ * @param data 512-octet blob for x87/SSE state
+ */
+void vcpu_setup_fpu(struct vcpu *v, const void *data);
#endif /* __ASM_I386_I387_H */
--
2.34.1
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |