|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH V4 2/4] x86/xsaves: enable xsaves/xrstors in xen
>>> On 25.08.15 at 12:54, <shuai.ruan@xxxxxxxxxxxxxxx> wrote:
> @@ -896,9 +897,28 @@ long arch_do_domctl(
> ret = -EFAULT;
>
> offset += sizeof(v->arch.xcr0_accum);
> - if ( !ret && copy_to_guest_offset(evc->buffer, offset,
> - (void *)v->arch.xsave_area,
> - size - 2 * sizeof(uint64_t)) )
> +
> + if ( cpu_has_xsaves )
> + {
There is not point in entering this code when ret != 0.
> + xsave_area = xmalloc_bytes(size);
> + if ( !xsave_area )
> + {
> + ret = -ENOMEM;
> + goto vcpuextstate_out;
As can even be seen from the patch context, this bypasses the
vcpu_unpause().
> + }
> +
> + save_xsave_states(v, xsave_area,
> + evc->size - 2*sizeof(uint64_t));
> +
> + if ( !ret && copy_to_guest_offset(evc->buffer, offset,
> + xsave_area, size -
> + 2 * sizeof(uint64_t)) )
> + ret = -EFAULT;
> + xfree(xsave_area);
> + }
> + else if ( !ret && copy_to_guest_offset(evc->buffer, offset,
> + (void *)v->arch.xsave_area,
> + size - 2 *
> sizeof(uint64_t)) )
> ret = -EFAULT;
>
> vcpu_unpause(v);
> @@ -954,8 +974,12 @@ long arch_do_domctl(
> v->arch.xcr0_accum = _xcr0_accum;
> if ( _xcr0_accum & XSTATE_NONLAZY )
> v->arch.nonlazy_xstate_used = 1;
> - memcpy(v->arch.xsave_area, _xsave_area,
> - evc->size - 2 * sizeof(uint64_t));
> + if ( cpu_has_xsaves )
> + load_xsave_states(v, (void *)_xsave_area,
Casts like this and ...
> + evc->size - 2*sizeof(uint64_t));
> + else
> + memcpy(v->arch.xsave_area, (void *)_xsave_area,
> + evc->size - 2 * sizeof(uint64_t));
> vcpu_unpause(v);
> }
> else
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -2148,8 +2148,12 @@ static int hvm_save_cpu_xsave_states(struct domain *d,
> hvm_domain_context_t *h)
> ctxt->xfeature_mask = xfeature_mask;
> ctxt->xcr0 = v->arch.xcr0;
> ctxt->xcr0_accum = v->arch.xcr0_accum;
> - memcpy(&ctxt->save_area, v->arch.xsave_area,
> - size - offsetof(struct hvm_hw_cpu_xsave, save_area));
> + if ( cpu_has_xsaves )
> + save_xsave_states(v, (void *)&ctxt->save_area,
... this and ...
> + size - offsetof(struct
> hvm_hw_cpu_xsave,save_area));
> + else
> + memcpy(&ctxt->save_area, v->arch.xsave_area,
> + size - offsetof(struct hvm_hw_cpu_xsave, save_area));
> }
>
> return 0;
> @@ -2248,9 +2252,14 @@ static int hvm_load_cpu_xsave_states(struct domain *d,
> hvm_domain_context_t *h)
> v->arch.xcr0_accum = ctxt->xcr0_accum;
> if ( ctxt->xcr0_accum & XSTATE_NONLAZY )
> v->arch.nonlazy_xstate_used = 1;
> - memcpy(v->arch.xsave_area, &ctxt->save_area,
> - min(desc->length, size) - offsetof(struct hvm_hw_cpu_xsave,
> - save_area));
> + if ( cpu_has_xsaves )
> + load_xsave_states(v, (void *)&ctxt->save_area,
... this are bogus and should be avoided if at all possible.
> --- a/xen/arch/x86/i387.c
> +++ b/xen/arch/x86/i387.c
> @@ -309,7 +309,11 @@ int vcpu_init_fpu(struct vcpu *v)
> return rc;
>
> if ( v->arch.xsave_area )
> + {
> v->arch.fpu_ctxt = &v->arch.xsave_area->fpu_sse;
> + if ( cpu_has_xsaves )
Why (only) dependent on XSAVES and not (also) XSAVEC?
> --- a/xen/arch/x86/traps.c
> +++ b/xen/arch/x86/traps.c
> @@ -936,9 +936,10 @@ void pv_cpuid(struct cpu_user_regs *regs)
> if ( regs->_ecx == 1 )
> {
> a &= XSTATE_FEATURE_XSAVEOPT |
> - XSTATE_FEATURE_XSAVEC |
> - (cpu_has_xgetbv1 ? XSTATE_FEATURE_XGETBV1 : 0) |
> - (cpu_has_xsaves ? XSTATE_FEATURE_XSAVES : 0);
> + XSTATE_FEATURE_XSAVEC;
> + /* PV guest will not support xsaves. */
> + /* (cpu_has_xgetbv1 ? XSTATE_FEATURE_XGETBV1 : 0) |
> + (cpu_has_xsaves ? XSTATE_FEATURE_XSAVES : 0); */
As already said to Andrew - fine for XSAVES, but why also for
XGETBV1?
> --- a/xen/arch/x86/xstate.c
> +++ b/xen/arch/x86/xstate.c
> @@ -214,6 +214,11 @@ void xsave(struct vcpu *v, uint64_t mask)
> typeof(ptr->fpu_sse.fip.sel) fcs = ptr->fpu_sse.fip.sel;
> typeof(ptr->fpu_sse.fdp.sel) fds = ptr->fpu_sse.fdp.sel;
>
> + if ( cpu_has_xsaves )
> + asm volatile ( ".byte 0x48,0x0f,0xc7,0x2f"
> + : "=m" (*ptr)
> + : "a" (lmask), "d" (hmask), "D" (ptr) );
> + else
> if ( cpu_has_xsaveopt )
Same question as above - why not also use XSAVEC when
available?
Jan
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |