[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH] Fix save/restore for HVM domains with viridian=1



>>> On 25.11.11 at 15:50, Paul Durrant <paul.durrant@xxxxxxxxxx> wrote:
> # HG changeset patch
> # User Paul Durrant <paul.durrant@xxxxxxxxxx>
> # Date 1322232651 0
> # Node ID a3c5e87c73a991861fcdd9a5a1eb8ebc635a2d09
> # Parent  0a0c02a616768bfab16c072788cb76be1893c37f
> Fix save/restore for HVM domains with viridian=1
> 
> xc_domain_save/restore currently pay no attention to HVM_PARAM_VIRIDIAN 
> which
> results in an HVM domain running a recent version on Windows (post-Vista)
> locking up on a domain restore due to EOIs (done via a viridian MSR write)
> being silently dropped.
> This patch adds an extra save entry for the viridian parameter and also
> adds code in the viridian kernel module to catch attempted use of viridian
> functionality when the HVM parameter has not been set.

Assuming this means the changes to {rd,wr}msr_viridian_regs(), isn't
this going to needlessly spam the log? I.e. printing this just once (per
VM) would seem to suffice. Or alternatively I would think that
HVM_DBG_LOG() would be a better choice here.

Jan

> Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
> 
> diff -r 0a0c02a61676 -r a3c5e87c73a9 tools/libxc/xc_domain_restore.c
> --- a/tools/libxc/xc_domain_restore.c Mon Nov 21 21:28:34 2011 +0000
> +++ b/tools/libxc/xc_domain_restore.c Fri Nov 25 14:50:51 2011 +0000
> @@ -675,6 +675,7 @@ typedef struct {
>      uint64_t vm86_tss;
>      uint64_t console_pfn;
>      uint64_t acpi_ioport_location;
> +    uint64_t viridian;
>  } pagebuf_t;
>  
>  static int pagebuf_init(pagebuf_t* buf)
> @@ -809,6 +810,16 @@ static int pagebuf_get_one(xc_interface 
>          }
>          return pagebuf_get_one(xch, ctx, buf, fd, dom);
>  
> +    case XC_SAVE_ID_HVM_VIRIDIAN:
> +        /* Skip padding 4 bytes then read the acpi ioport location. */
> +        if ( RDEXACT(fd, &buf->viridian, sizeof(uint32_t)) ||
> +             RDEXACT(fd, &buf->viridian, sizeof(uint64_t)) )
> +        {
> +            PERROR("error read the viridian flag");
> +            return -1;
> +        }
> +        return pagebuf_get_one(xch, ctx, buf, fd, dom);
> +
>      default:
>          if ( (count > MAX_BATCH_SIZE) || (count < 0) ) {
>              ERROR("Max batch size exceeded (%d). Giving up.", count);
> @@ -1440,6 +1451,9 @@ int xc_domain_restore(xc_interface *xch,
>              fcntl(io_fd, F_SETFL, orig_io_fd_flags | O_NONBLOCK);
>      }
>  
> +    if (pagebuf.viridian != 0)
> +        xc_set_hvm_param(xch, dom, HVM_PARAM_VIRIDIAN, 1);
> +
>      if (pagebuf.acpi_ioport_location == 1) {
>          DBGPRINTF("Use new firmware ioport from the checkpoint\n");
>          xc_set_hvm_param(xch, dom, HVM_PARAM_ACPI_IOPORTS_LOCATION, 1);
> diff -r 0a0c02a61676 -r a3c5e87c73a9 tools/libxc/xc_domain_save.c
> --- a/tools/libxc/xc_domain_save.c    Mon Nov 21 21:28:34 2011 +0000
> +++ b/tools/libxc/xc_domain_save.c    Fri Nov 25 14:50:51 2011 +0000
> @@ -1506,6 +1506,18 @@ int xc_domain_save(xc_interface *xch, in
>              PERROR("Error when writing the firmware ioport version");
>              goto out;
>          }
> +
> +        chunk.id = XC_SAVE_ID_HVM_VIRIDIAN;
> +        chunk.data = 0;
> +        xc_get_hvm_param(xch, dom, HVM_PARAM_VIRIDIAN,
> +                         (unsigned long *)&chunk.data);
> +
> +        if ( (chunk.data != 0) &&
> +             wrexact(io_fd, &chunk, sizeof(chunk)) )
> +        {
> +            PERROR("Error when writing the viridian flag");
> +            goto out;
> +        }
>      }
>  
>      if ( !callbacks->checkpoint )
> diff -r 0a0c02a61676 -r a3c5e87c73a9 tools/libxc/xg_save_restore.h
> --- a/tools/libxc/xg_save_restore.h   Mon Nov 21 21:28:34 2011 +0000
> +++ b/tools/libxc/xg_save_restore.h   Fri Nov 25 14:50:51 2011 +0000
> @@ -134,6 +134,7 @@
>  #define XC_SAVE_ID_HVM_CONSOLE_PFN    -8 /* (HVM-only) */
>  #define XC_SAVE_ID_LAST_CHECKPOINT    -9 /* Commit to restoring after 
> completion of current iteration. */
>  #define XC_SAVE_ID_HVM_ACPI_IOPORTS_LOCATION -10
> +#define XC_SAVE_ID_HVM_VIRIDIAN       -11
>  
>  /*
>  ** We process save/restore/migrate in batches of pages; the below
> diff -r 0a0c02a61676 -r a3c5e87c73a9 xen/arch/x86/hvm/viridian.c
> --- a/xen/arch/x86/hvm/viridian.c     Mon Nov 21 21:28:34 2011 +0000
> +++ b/xen/arch/x86/hvm/viridian.c     Fri Nov 25 14:50:51 2011 +0000
> @@ -206,8 +206,11 @@ int wrmsr_viridian_regs(uint32_t idx, ui
>      struct vcpu *v = current;
>      struct domain *d = v->domain;
>  
> -    if ( !is_viridian_domain(d) )
> +    if ( !is_viridian_domain(d) ) {
> +        gdprintk(XENLOG_WARNING, "%s: %d not a viridian domain\n", 
> __func__,
> +                 d->domain_id);
>          return 0;
> +    }
>  
>      switch ( idx )
>      {
> @@ -271,8 +274,11 @@ int rdmsr_viridian_regs(uint32_t idx, ui
>      struct vcpu *v = current;
>      struct domain *d = v->domain;
>      
> -    if ( !is_viridian_domain(d) )
> +    if ( !is_viridian_domain(d) ) {
> +        gdprintk(XENLOG_WARNING, "%s: %d not a viridian domain\n", 
> __func__,
> +                 d->domain_id);
>          return 0;
> +    }
>  
>      switch ( idx )
>      {
> @@ -411,6 +417,8 @@ static int viridian_load_domain_ctxt(str
>      if ( hvm_load_entry(VIRIDIAN_DOMAIN, h, &ctxt) != 0 )
>          return -EINVAL;
>  
> +    ASSERT(is_viridian_domain(d));
> +
>      d->arch.hvm_domain.viridian.hypercall_gpa.raw = ctxt.hypercall_gpa;
>      d->arch.hvm_domain.viridian.guest_os_id.raw   = ctxt.guest_os_id;
>  
> @@ -455,6 +463,8 @@ static int viridian_load_vcpu_ctxt(struc
>      if ( hvm_load_entry(VIRIDIAN_VCPU, h, &ctxt) != 0 )
>          return -EINVAL;
>  
> +    ASSERT(is_viridian_domain(d));
> +
>      v->arch.hvm_vcpu.viridian.apic_assist.raw = ctxt.apic_assist;
>  
>      return 0;
> 
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@xxxxxxxxxxxxxxxxxxx 
> http://lists.xensource.com/xen-devel 



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.