[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v2 RFC 2/4] x86/PV: support data breakpoint extension registers



On Wed, 2014-03-26 at 14:13 +0000, Jan Beulich wrote:
> Introducing an extension to XEN_DOMCTL_[gs]et_ext_vcpucontext similar
> to the generic MSR save/restore logic recently added for HVM.

"x86: generic MSRs save/restore" I guess?

Is the intention here that the extvcpu context is a blob of opaque data
from the pov of the migration tools?

Do the protocol docs in xg_save_restore need updating due to this
change?

How does N->N+1 migration work out?

> This also moves some debug register related declarations/definition to
> the header intended for these.
> 
> Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
> ---
> v2: libxc adjustment put in place (depending on the separately posted
>     http://lists.xenproject.org/archives/html/xen-devel/2014-03/msg03059.html)
> 
> --- a/tools/libxc/xc_domain_restore.c
> +++ b/tools/libxc/xc_domain_restore.c
> @@ -590,8 +590,13 @@ static int buffer_tail_pv(xc_interface *
>                            uint32_t vcpuextstate_size)
>  {
>      unsigned int i;
> -    size_t pfnlen, vcpulen;
> +    size_t pfnlen, vcpulen, total;
> +    int alloc = 0;
>      struct domain_info_context *dinfo = &ctx->dinfo;
> +    union {
> +        const unsigned char *raw;
> +        const xen_domctl_ext_vcpucontext_t *evc;
> +    } ptr;
>  
>      /* TODO: handle changing pfntab and vcpu counts */
>      /* PFN tab */
> @@ -634,11 +639,36 @@ static int buffer_tail_pv(xc_interface *
>              ERROR("Error allocating VCPU ctxt tail buffer");
>              goto free_pfntab;
>          }
> +        alloc = 1;
>      }
>      // DPRINTF("Reading VCPUS: %d bytes\n", vcpulen);
> -    if ( RDEXACT(fd, buf->vcpubuf, vcpulen) ) {
> -        PERROR("Error when reading ctxt");
> -        goto free_vcpus;
> +    for (total = i = 0, ptr.raw = buf->vcpubuf; ext_vcpucontext; ) {
> +        if ( RDEXACT(fd, buf->vcpubuf + total, vcpulen) ) {
> +            PERROR("Error when reading ctxt");
> +            goto free_vcpus;
> +        }
> +        total += vcpulen;
> +        for (vcpulen = 0; i < buf->vcpucount; ++i) {
> +            size_t msrlen;
> +
> +            if ((const unsigned char *)(ptr.evc + 1) > buf->vcpubuf + total)
> +                break;
> +            msrlen = ptr.evc->msr_count * sizeof(xen_domctl_ext_vcpu_msr_t);
> +            vcpulen += msrlen;
> +            ptr.raw += 128 + msrlen + vcpuextstate_size;
> +        }
> +        if (!vcpulen)
> +            break;
> +        if (alloc) {
> +            void *nbuf = realloc(buf->vcpubuf, total + vcpulen);
> +
> +            if (!nbuf) {
> +                ERROR("Error growing VCPU ctxt tail buffer");
> +                goto free_vcpus;
> +            }
> +            ptr.raw = nbuf + (ptr.raw - buf->vcpubuf);
> +            buf->vcpubuf = nbuf;
> +        }
>      }
>  
>      /* load shared_info_page */
> @@ -1996,6 +2026,8 @@ int xc_domain_restore(xc_interface *xch,
>      vcpup = tailbuf.u.pv.vcpubuf;
>      for ( i = 0; i <= max_vcpu_id; i++ )
>      {
> +        DECLARE_HYPERCALL_BUFFER(xen_domctl_ext_vcpu_msr_t, msrs);
> +
>          if ( !(vcpumap[i/64] & (1ULL << (i%64))) )
>              continue;
>  
> @@ -2130,9 +2162,25 @@ int xc_domain_restore(xc_interface *xch,
>              goto vcpu_ext_state_restore;
>          memcpy(&domctl.u.ext_vcpucontext, vcpup, 128);
>          vcpup += 128;
> +        if ( domctl.u.ext_vcpucontext.msr_count )
> +        {
> +            size_t sz = domctl.u.ext_vcpucontext.msr_count * sizeof(*msrs);
> +
> +            msrs = xc_hypercall_buffer_alloc(xch, msrs, sz);
> +            if ( !msrs )
> +            {
> +                PERROR("No memory for vcpu%d MSRs", i);
> +                goto out;
> +            }
> +            memcpy(msrs, vcpup, sz);

This seems to be opencoding the hypercall bounce buffer stuff to some
extent.

> +            vcpup += sz;
> +            set_xen_guest_handle(domctl.u.ext_vcpucontext.msrs, msrs);
> +        }
>          domctl.cmd = XEN_DOMCTL_set_ext_vcpucontext;
>          domctl.domain = dom;
>          frc = xc_domctl(xch, &domctl);
> +        if ( msrs )
> +            xc_hypercall_buffer_free(xch, msrs);

I think you don't need the if ( msrs ) here.

>          if ( frc != 0 )
>          {
>              PERROR("Couldn't set extended vcpu%d info", i);
> --- a/tools/libxc/xc_domain_save.c
> +++ b/tools/libxc/xc_domain_save.c
> @@ -836,6 +836,9 @@ int xc_domain_save(xc_interface *xch, in
>      /* base of the region in which domain memory is mapped */
>      unsigned char *region_base = NULL;
>  
> +    /* MSR extensions to xen_domctl_ext_vcpucontext_t */
> +    DECLARE_HYPERCALL_BUFFER(xen_domctl_ext_vcpu_msr_t, msrs);
> +
>      /* A copy of the CPU eXtended States of the guest. */
>      DECLARE_HYPERCALL_BUFFER(void, buffer);
>  
> @@ -1960,16 +1963,36 @@ int xc_domain_save(xc_interface *xch, in
>          domctl.domain = dom;
>          memset(&domctl.u, 0, sizeof(domctl.u));
>          domctl.u.ext_vcpucontext.vcpu = i;
> -        if ( xc_domctl(xch, &domctl) < 0 )
> +        frc = xc_domctl(xch, &domctl);
> +        if ( frc < 0 && errno == ENOBUFS && 
> domctl.u.ext_vcpucontext.msr_count )
> +        {
> +            msrs = xc_hypercall_buffer_alloc(xch, msrs,
> +                                             
> domctl.u.ext_vcpucontext.msr_count *
> +                                             sizeof(*msrs));
> +            set_xen_guest_handle(domctl.u.ext_vcpucontext.msrs, msrs);
> +            frc = msrs ? xc_domctl(xch, &domctl) : -1;
> +        }
> +        if ( frc < 0 )
>          {
>              PERROR("No extended context for VCPU%d", i);
>              goto out;
>          }
>          if ( wrexact(io_fd, &domctl.u.ext_vcpucontext, 128) )
>          {
> -            PERROR("Error when writing to state file (2)");
> +            PERROR("Error when writing to state file (ext ctxt)");
>              goto out;
>          }
> +        if ( msrs )
> +        {
> +            if ( wrexact(io_fd, msrs,
> +                         domctl.u.ext_vcpucontext.msr_count * sizeof(*msrs)) 
> )
> +            {
> +                PERROR("Error when writing to state file (MSRs)");
> +                goto out;
> +            }
> +            xc_hypercall_buffer_free(xch, msrs);
> +            msrs = NULL;
> +        }
>  
>          /* Start to fetch CPU eXtended States */
>          /* Get buffer size first */
> @@ -2134,6 +2157,8 @@ int xc_domain_save(xc_interface *xch, in
>  
>      xc_hypercall_buffer_free_pages(xch, to_send, 
> NRPAGES(bitmap_size(dinfo->p2m_size)));
>      xc_hypercall_buffer_free_pages(xch, to_skip, 
> NRPAGES(bitmap_size(dinfo->p2m_size)));
> +    if (msrs)

if ( msrs ) if indeed it is needed at all.

> +        xc_hypercall_buffer_free(xch, msrs);
>  
>      free(pfn_type);
>      free(pfn_batch);

> @@ -563,6 +563,16 @@ typedef struct xen_domctl_pin_mem_cachea
>  DEFINE_XEN_GUEST_HANDLE(xen_domctl_pin_mem_cacheattr_t);
>  
> 
> +#if defined(__i386__) || defined(__x86_64__)
> +struct xen_domctl_ext_vcpu_msr {
> +    uint32_t         index;
> +    uint32_t         reserved;
> +    uint64_aligned_t value;
> +};

Maybe this is just me forgetting x86 stuff but are these "data
breakpoint extension registers" actually MSRs?

If so why is this not part of the generic MSR support which you
referenced earlier? At least at the domctl level it seems to be generic
here too?

> +typedef struct xen_domctl_ext_vcpu_msr xen_domctl_ext_vcpu_msr_t;
> +DEFINE_XEN_GUEST_HANDLE(xen_domctl_ext_vcpu_msr_t);
> +#endif
> +
>  /* XEN_DOMCTL_set_ext_vcpucontext */
>  /* XEN_DOMCTL_get_ext_vcpucontext */
>  struct xen_domctl_ext_vcpucontext {
> @@ -582,6 +592,7 @@ struct xen_domctl_ext_vcpucontext {
>      uint16_t         sysenter_callback_cs;
>      uint8_t          syscall32_disables_events;
>      uint8_t          sysenter_disables_events;
> +    uint16_t         msr_count;
>  #if defined(__GNUC__)
>      union {
>          uint64_aligned_t mcg_cap;
> @@ -590,6 +601,7 @@ struct xen_domctl_ext_vcpucontext {
>  #else
>      struct hvm_vmce_vcpu vmce;
>  #endif
> +    XEN_GUEST_HANDLE_64(xen_domctl_ext_vcpu_msr_t) msrs;
>  #endif
>  };
>  typedef struct xen_domctl_ext_vcpucontext xen_domctl_ext_vcpucontext_t;
> 
> 



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.