# HG changeset patch # User Wei Huang # Date 1304696422 18000 # Node ID ab7d2191421fbf95313e1109de3ced5e2a094424 # Parent 5badff7cda6b4af71991d8150803ac2f7c4a27f5 FPU: add mask parameter to xsave and xrstor Xen currently sets mask bits of xsave() and xrstor() to all 1's. This patch adds a mask option to xsave() and xrstor(). Signed-off-by: Wei Huang diff -r 5badff7cda6b -r ab7d2191421f xen/arch/x86/i387.c --- a/xen/arch/x86/i387.c Tue May 03 13:45:26 2011 -0500 +++ b/xen/arch/x86/i387.c Fri May 06 10:40:22 2011 -0500 @@ -35,14 +35,14 @@ /* FPU Restore Functions */ /*******************************/ /* Restore x87 extended state */ -static inline void fpu_xrstor(struct vcpu *v) +static inline void fpu_xrstor(struct vcpu *v, uint64_t mask) { /* * XCR0 normally represents what guest OS set. In case of Xen itself, * we set all supported feature mask before doing save/restore. */ set_xcr0(v->arch.xcr0_accum); - xrstor(v); + xrstor(v, mask); set_xcr0(v->arch.xcr0); } @@ -98,13 +98,13 @@ /* FPU Save Functions */ /*******************************/ /* Save x87 extended state */ -static inline void fpu_xsave(struct vcpu *v) +static inline void fpu_xsave(struct vcpu *v, uint64_t mask) { /* XCR0 normally represents what guest OS set. In case of Xen itself, * we set all accumulated feature mask before doing save/restore. */ set_xcr0(v->arch.xcr0_accum); - xsave(v); + xsave(v, mask); set_xcr0(v->arch.xcr0); } @@ -174,7 +174,7 @@ return; if ( xsave_enabled(v) ) - fpu_xrstor(v); + fpu_xrstor(v, XSTATE_ALL); else if ( v->fpu_initialised ) { if ( cpu_has_fxsr ) @@ -204,7 +204,7 @@ clts(); if ( xsave_enabled(v) ) - fpu_xsave(v); + fpu_xsave(v, XSTATE_ALL); else if ( cpu_has_fxsr ) fpu_fxsave(v); else diff -r 5badff7cda6b -r ab7d2191421f xen/arch/x86/xstate.c --- a/xen/arch/x86/xstate.c Tue May 03 13:45:26 2011 -0500 +++ b/xen/arch/x86/xstate.c Fri May 06 10:40:22 2011 -0500 @@ -51,32 +51,37 @@ return this_cpu(xcr0); } -void xsave(struct vcpu *v) +void xsave(struct vcpu *v, uint64_t mask) { struct xsave_struct *ptr = v->arch.xsave_area; + uint32_t hmask = mask >> 32; + uint32_t lmask = mask; if ( cpu_has_xsaveopt ) asm volatile ( ".byte " REX_PREFIX "0x0f,0xae,0x37" : - : "a" (-1), "d" (-1), "D"(ptr) + : "a" (lmask), "d" (hmask), "D"(ptr) : "memory" ); else asm volatile ( ".byte " REX_PREFIX "0x0f,0xae,0x27" : - : "a" (-1), "d" (-1), "D"(ptr) + : "a" (lmask), "d" (hmask), "D"(ptr) : "memory" ); } -void xrstor(struct vcpu *v) +void xrstor(struct vcpu *v, uint64_t mask) { + uint32_t hmask = mask >> 32; + uint32_t lmask = mask; + struct xsave_struct *ptr = v->arch.xsave_area; asm volatile ( ".byte " REX_PREFIX "0x0f,0xae,0x2f" : - : "m" (*ptr), "a" (-1), "d" (-1), "D"(ptr) ); + : "m" (*ptr), "a" (lmask), "d" (hmask), "D"(ptr) ); } bool_t xsave_enabled(const struct vcpu *v) diff -r 5badff7cda6b -r ab7d2191421f xen/include/asm-x86/xstate.h --- a/xen/include/asm-x86/xstate.h Tue May 03 13:45:26 2011 -0500 +++ b/xen/include/asm-x86/xstate.h Fri May 06 10:40:22 2011 -0500 @@ -26,6 +26,10 @@ #define XSTATE_LWP (1ULL << 62) /* AMD lightweight profiling */ #define XSTATE_FP_SSE (XSTATE_FP | XSTATE_SSE) #define XCNTXT_MASK (XSTATE_FP | XSTATE_SSE | XSTATE_YMM | XSTATE_LWP) + +#define XSTATE_ALL (~0) +#define XSTATE_NONLAZY (XSTATE_LWP) +#define XSTATE_LAZY (XSTATE_ALL & ~XSTATE_NONLAZY) #ifdef CONFIG_X86_64 #define REX_PREFIX "0x48, " @@ -56,8 +60,8 @@ /* extended state operations */ void set_xcr0(u64 xfeatures); uint64_t get_xcr0(void); -void xsave(struct vcpu *v); -void xrstor(struct vcpu *v); +void xsave(struct vcpu *v, uint64_t mask); +void xrstor(struct vcpu *v, uint64_t mask); bool_t xsave_enabled(const struct vcpu *v); /* extended state init and cleanup functions */