|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v3 3/3] libxc/PV: save/restore data breakpoint extension registers
Requiring the handling of the generic MSR extension to
XEN_DOMCTL_[gs]et_ext_vcpucontext.
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
---
v3: Split off of hypervisor patch and address review comments:
- check XEN_DOMCTL_get_ext_vcpucontext size output before looking
at msr_count field
- scrub xen_domctl_ext_vcpucontext_t's msrs field on the sending
side
- clear xen_domctl_ext_vcpucontext_t's msr_count field on the
restore side if the size field doesn't cover the msrs one
- make use of hypercall buffer bouncing interfaces on the restore
side (on the save side this would seem to complicate the code
rather than simplifying it)
- add documenting note to tools/libxc/xg_save_restore.h
--- a/tools/libxc/xc_domain_restore.c
+++ b/tools/libxc/xc_domain_restore.c
@@ -38,6 +38,7 @@
#include <stdlib.h>
#include <unistd.h>
+#include <assert.h>
#include "xg_private.h"
#include "xg_save_restore.h"
@@ -590,8 +591,13 @@ static int buffer_tail_pv(xc_interface *
uint32_t vcpuextstate_size)
{
unsigned int i;
- size_t pfnlen, vcpulen;
+ size_t pfnlen, vcpulen, total;
+ int alloc = 0;
struct domain_info_context *dinfo = &ctx->dinfo;
+ union {
+ const unsigned char *raw;
+ xen_domctl_ext_vcpucontext_t *evc;
+ } ptr;
/* TODO: handle changing pfntab and vcpu counts */
/* PFN tab */
@@ -634,11 +640,42 @@ static int buffer_tail_pv(xc_interface *
ERROR("Error allocating VCPU ctxt tail buffer");
goto free_pfntab;
}
+ alloc = 1;
}
// DPRINTF("Reading VCPUS: %d bytes\n", vcpulen);
- if ( RDEXACT(fd, buf->vcpubuf, vcpulen) ) {
- PERROR("Error when reading ctxt");
- goto free_vcpus;
+ for ( total = i = 0, ptr.raw = buf->vcpubuf; ext_vcpucontext; ) {
+ if ( RDEXACT(fd, buf->vcpubuf + total, vcpulen) ) {
+ PERROR("Error when reading ctxt");
+ goto free_vcpus;
+ }
+ total += vcpulen;
+ for ( vcpulen = 0; i < buf->vcpucount; ++i ) {
+ size_t msrlen;
+
+ if ( (const unsigned char *)(ptr.evc + 1) > buf->vcpubuf + total )
+ break;
+ if ( ptr.evc->size <
+ (offsetof(xen_domctl_ext_vcpucontext_t, msrs) +
+ sizeof(ptr.evc->msrs)) )
+ ptr.evc->msr_count = 0;
+ msrlen = ptr.evc->msr_count * sizeof(xen_domctl_ext_vcpu_msr_t);
+ vcpulen += msrlen;
+ ptr.raw += 128 + msrlen + vcpuextstate_size;
+ }
+ if ( !vcpulen ) {
+ assert(i == buf->vcpucount);
+ break;
+ }
+ if ( alloc ) {
+ void *nbuf = realloc(buf->vcpubuf, total + vcpulen);
+
+ if ( !nbuf ) {
+ ERROR("Error growing VCPU ctxt tail buffer");
+ goto free_vcpus;
+ }
+ ptr.raw = nbuf + (ptr.raw - buf->vcpubuf);
+ buf->vcpubuf = nbuf;
+ }
}
/* load shared_info_page */
@@ -2130,9 +2167,28 @@ int xc_domain_restore(xc_interface *xch,
goto vcpu_ext_state_restore;
memcpy(&domctl.u.ext_vcpucontext, vcpup, 128);
vcpup += 128;
- domctl.cmd = XEN_DOMCTL_set_ext_vcpucontext;
- domctl.domain = dom;
- frc = xc_domctl(xch, &domctl);
+ if ( domctl.u.ext_vcpucontext.msr_count )
+ {
+ xen_domctl_ext_vcpu_msr_t *msrs = (void *)vcpup;
+ size_t sz = domctl.u.ext_vcpucontext.msr_count * sizeof(*msrs);
+ DECLARE_HYPERCALL_BOUNCE(msrs, sz, XC_HYPERCALL_BUFFER_BOUNCE_IN);
+
+ if ( xc_hypercall_bounce_pre(xch, msrs) )
+ {
+ PERROR("Can't bounce in vcpu%d MSRs", i);
+ goto out;
+ }
+ set_xen_guest_handle(domctl.u.ext_vcpucontext.msrs, msrs);
+ domctl.cmd = XEN_DOMCTL_set_ext_vcpucontext;
+ domctl.domain = dom;
+ frc = xc_domctl(xch, &domctl);
+ xc_hypercall_bounce_post(xch, msrs);
+ vcpup += sz;
+ } else {
+ domctl.cmd = XEN_DOMCTL_set_ext_vcpucontext;
+ domctl.domain = dom;
+ frc = xc_domctl(xch, &domctl);
+ }
if ( frc != 0 )
{
PERROR("Couldn't set extended vcpu%d info", i);
--- a/tools/libxc/xc_domain_save.c
+++ b/tools/libxc/xc_domain_save.c
@@ -836,6 +836,9 @@ int xc_domain_save(xc_interface *xch, in
/* base of the region in which domain memory is mapped */
unsigned char *region_base = NULL;
+ /* MSR extensions to xen_domctl_ext_vcpucontext_t */
+ DECLARE_HYPERCALL_BUFFER(xen_domctl_ext_vcpu_msr_t, msrs);
+
/* A copy of the CPU eXtended States of the guest. */
DECLARE_HYPERCALL_BUFFER(void, buffer);
@@ -1960,16 +1963,42 @@ int xc_domain_save(xc_interface *xch, in
domctl.domain = dom;
memset(&domctl.u, 0, sizeof(domctl.u));
domctl.u.ext_vcpucontext.vcpu = i;
- if ( xc_domctl(xch, &domctl) < 0 )
+ frc = xc_domctl(xch, &domctl);
+ if ( frc < 0 && errno == ENOBUFS &&
+ domctl.u.ext_vcpucontext.size >=
+ (offsetof(xen_domctl_ext_vcpucontext_t, msrs) +
+ sizeof(domctl.u.ext_vcpucontext.msrs)) &&
+ domctl.u.ext_vcpucontext.msr_count )
+ {
+ msrs = xc_hypercall_buffer_alloc(xch, msrs,
+
domctl.u.ext_vcpucontext.msr_count *
+ sizeof(*msrs));
+ set_xen_guest_handle(domctl.u.ext_vcpucontext.msrs, msrs);
+ frc = msrs ? xc_domctl(xch, &domctl) : -1;
+ /* Don't save the actual pointer. */
+ set_xen_guest_handle_raw(domctl.u.ext_vcpucontext.msrs, NULL);
+ }
+ if ( frc < 0 )
{
PERROR("No extended context for VCPU%d", i);
goto out;
}
if ( wrexact(io_fd, &domctl.u.ext_vcpucontext, 128) )
{
- PERROR("Error when writing to state file (2)");
+ PERROR("Error when writing to state file (ext ctxt)");
goto out;
}
+ if ( msrs )
+ {
+ if ( wrexact(io_fd, msrs,
+ domctl.u.ext_vcpucontext.msr_count * sizeof(*msrs)) )
+ {
+ PERROR("Error when writing to state file (MSRs)");
+ goto out;
+ }
+ xc_hypercall_buffer_free(xch, msrs);
+ msrs = NULL;
+ }
/* Start to fetch CPU eXtended States */
/* Get buffer size first */
@@ -2134,6 +2163,7 @@ int xc_domain_save(xc_interface *xch, in
xc_hypercall_buffer_free_pages(xch, to_send,
NRPAGES(bitmap_size(dinfo->p2m_size)));
xc_hypercall_buffer_free_pages(xch, to_skip,
NRPAGES(bitmap_size(dinfo->p2m_size)));
+ xc_hypercall_buffer_free(xch, msrs);
free(pfn_type);
free(pfn_batch);
--- a/tools/libxc/xg_save_restore.h
+++ b/tools/libxc/xg_save_restore.h
@@ -62,6 +62,9 @@
* the block size.
* "extv" : Presence indicates use of extended VCPU context in
* tail, data size is 0.
+ * This block may be followed by an array of
+ * xen_domctl_ext_vcpu_msr_t as indicated by
+ * xen_domctl_ext_vcpucontext_t's msr_count field.
*
* p2m (PV-only):
*
Attachment:
PV-debug-address-mask-MSRs-sr.patch _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |