[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 1/4] x86/domctl: Implement XEN_DOMCTL_{get, set}_vcpu_msrs
Despite my 'Reviewed-by' tag on c/s 65e3554908 "x86/PV: support data breakpoint extension registers", I have re-evaluated my position as far as the hypercall interface is concerned. Previously, for the sake of not modifying the migration code in libxc, XEN_DOMCTL_get_ext_vcpucontext would jump though hoops to return -ENOBUFS if and only if MSRs were in use and no buffer was present. This is fragile, and awkward from a toolstack point-of-view when actually sending MSR content in the migration stream. It also complicates fixing a further race condition, between querying the number of MSRs for a vcpu, and the vcpu touching a new one. As this code is still only in staging, take this opportunity to redesign the interface. This patch introduces the brand new XEN_DOMCTL_{get,set}_vcpu_msrs subops. Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> CC: Keir Fraser <keir@xxxxxxx> CC: Jan Beulich <JBeulich@xxxxxxxx> --- xen/arch/x86/domctl.c | 126 +++++++++++++++++++++++++++++++++++++++++++ xen/include/public/domctl.h | 31 +++++++++++ 2 files changed, 157 insertions(+) diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c index 1285dd0..c0c2d1b 100644 --- a/xen/arch/x86/domctl.c +++ b/xen/arch/x86/domctl.c @@ -1341,6 +1341,132 @@ long arch_do_domctl( } break; + case XEN_DOMCTL_get_vcpu_msrs: + case XEN_DOMCTL_set_vcpu_msrs: + { + struct xen_domctl_vcpu_msrs *vmsrs = &domctl->u.vcpu_msrs; + struct xen_domctl_vcpu_msr msr; + struct vcpu *v; + uint32_t nr_msrs = 0; + + ret = -ESRCH; + if ( (vmsrs->vcpu >= d->max_vcpus) || + ((v = d->vcpu[vmsrs->vcpu]) == NULL) ) + break; + + ret = -EINVAL; + if ( (v == current) || /* no vcpu_pause() */ + !is_pv_domain(d) ) + break; + + /* Count maximum number of optional msrs. */ + if ( boot_cpu_has(X86_FEATURE_DBEXT) ) + nr_msrs += 4; + + if ( domctl->cmd == XEN_DOMCTL_get_vcpu_msrs ) + { + /* NULL guest handle is a request for max size. */ + if ( guest_handle_is_null(vmsrs->msrs) || + (vmsrs->msr_count < nr_msrs) ) + { + vmsrs->msr_count = nr_msrs; + ret = guest_handle_is_null(vmsrs->msrs) ? 0 : -ENOBUFS; + } + else + { + ret = i = 0; + + vcpu_pause(v); + + if ( boot_cpu_has(X86_FEATURE_DBEXT) ) + { + unsigned int j; + + if ( v->arch.pv_vcpu.dr_mask[0] ) + { + msr.index = MSR_AMD64_DR0_ADDRESS_MASK; + msr.reserved = 0; + msr.value = v->arch.pv_vcpu.dr_mask[0]; + if ( copy_to_guest_offset(vmsrs->msrs, i, &msr, 1) ) + ret = -EFAULT; + else + ++i; + } + + for ( j = 0; j < 3; ++j ) + { + if ( !v->arch.pv_vcpu.dr_mask[1 + j] ) + continue; + if ( !ret ) + { + msr.index = MSR_AMD64_DR1_ADDRESS_MASK + j; + msr.reserved = 0; + msr.value = v->arch.pv_vcpu.dr_mask[1 + j]; + if ( copy_to_guest_offset(vmsrs->msrs, i, &msr, 1) ) + ret = -EFAULT; + else + ++i; + } + } + + } + + vcpu_unpause(v); + + /* Check we didn't lie to userspace then overflow the buffer */ + BUG_ON(i > nr_msrs); + vmsrs->msr_count = i; + } + + copyback = 1; + } + else + { + ret = -EINVAL; + if ( vmsrs->msr_count > nr_msrs ) + break; + + vcpu_pause(v); + + for ( i = 0; i < vmsrs->msr_count; ++i ) + { + ret = -EFAULT; + if ( copy_from_guest_offset(&msr, vmsrs->msrs, i, 1) ) + break; + + ret = -EINVAL; + if ( msr.reserved ) + break; + + switch ( msr.index ) + { + case MSR_AMD64_DR0_ADDRESS_MASK: + if ( !boot_cpu_has(X86_FEATURE_DBEXT) || + (msr.value >> 32) ) + break; + v->arch.pv_vcpu.dr_mask[0] = msr.value; + continue; + + case MSR_AMD64_DR1_ADDRESS_MASK ... + MSR_AMD64_DR3_ADDRESS_MASK: + if ( !boot_cpu_has(X86_FEATURE_DBEXT) || + (msr.value >> 32) ) + break; + msr.index -= MSR_AMD64_DR1_ADDRESS_MASK - 1; + v->arch.pv_vcpu.dr_mask[msr.index] = msr.value; + continue; + } + break; + } + + vcpu_unpause(v); + + if ( i == vmsrs->msr_count ) + ret = 0; + } + } + break; + default: ret = iommu_do_domctl(domctl, d, u_domctl); break; diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h index 385b053..7a13e25 100644 --- a/xen/include/public/domctl.h +++ b/xen/include/public/domctl.h @@ -895,6 +895,34 @@ struct xen_domctl_cacheflush { typedef struct xen_domctl_cacheflush xen_domctl_cacheflush_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_cacheflush_t); +#if defined(__i386__) || defined(__x86_64__) +struct xen_domctl_vcpu_msr { + uint32_t index; + uint32_t reserved; + uint64_aligned_t value; +}; +typedef struct xen_domctl_vcpu_msr xen_domctl_vcpu_msr_t; +DEFINE_XEN_GUEST_HANDLE(xen_domctl_vcpu_msr_t); + +/* + * XEN_DOMCTL_set_vcpu_msrs / XEN_DOMCTL_get_vcpu_msrs. + * + * For GET, a NULL 'msrs' guest handle is a request for the maximum + * 'msr_count' which is required to save the vcpu state. + * + * When actually getting the MSRs, 'msr_count' shall reflect the actual number + * of MSRs saved, which might be fewer than the maximum, if some are not + * currently used by the vcpu. + */ +struct xen_domctl_vcpu_msrs { + uint32_t vcpu; /* IN */ + uint32_t msr_count; /* IN/OUT */ + XEN_GUEST_HANDLE_64(xen_domctl_vcpu_msr_t) msrs; /* IN/OUT */ +}; +typedef struct xen_domctl_vcpu_msrs xen_domctl_vcpu_msrs_t; +DEFINE_XEN_GUEST_HANDLE(xen_domctl_vcpu_msrs_t); +#endif + struct xen_domctl { uint32_t cmd; #define XEN_DOMCTL_createdomain 1 @@ -965,6 +993,8 @@ struct xen_domctl { #define XEN_DOMCTL_getnodeaffinity 69 #define XEN_DOMCTL_set_max_evtchn 70 #define XEN_DOMCTL_cacheflush 71 +#define XEN_DOMCTL_get_vcpu_msrs 72 +#define XEN_DOMCTL_set_vcpu_msrs 73 #define XEN_DOMCTL_gdbsx_guestmemio 1000 #define XEN_DOMCTL_gdbsx_pausevcpu 1001 #define XEN_DOMCTL_gdbsx_unpausevcpu 1002 @@ -1014,6 +1044,7 @@ struct xen_domctl { #if defined(__i386__) || defined(__x86_64__) struct xen_domctl_cpuid cpuid; struct xen_domctl_vcpuextstate vcpuextstate; + struct xen_domctl_vcpu_msrs vcpu_msrs; #endif struct xen_domctl_set_access_required access_required; struct xen_domctl_audit_p2m audit_p2m; -- 1.7.10.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |