[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 1/18 V2]: PVH xen: turn gdb_frames/gdt_ents into union



On Fri, Mar 15, 2013 at 05:14:50PM -0700, Mukesh Rathor wrote:
> PVH only needs gdtaddr and gdtsz, so a union is created. There is no
> functional code change in this patch.

Did you copy-n-paste this patch in? It is all malformed.

git send-email works great for the patches. You can just do:

git format-patch 6524455339349779c553af949b81d3d46f051797..
[generates the patches]

git send-email --to xen-devel@xxxxxxxxxxxxx --to <other people> \
  --annotate --compose --subject "[PATCH v2] PVH patches." *.patch

and in your .gitconfig have:

[sendemail]
    smtpserver = stbeehive.oracle.com
    smtpserverport = 465
    smtpencryption = tls
    smtpuser = mukesh.rathor@xxxxxxxxxx

and it will just prompt you for your password.

> 
> Changes in V2: 
>    - Add __XEN_INTERFACE_VERSION__
> 
> Signed-off-by: Mukesh Rathor <mukesh.rathor@xxxxxxxxxx>
> ---
>  tools/libxc/xc_domain_restore.c   |    8 ++++----
>  tools/libxc/xc_domain_save.c      |    6 +++---
>  xen/arch/x86/domain.c             |   12 ++++++------
>  xen/arch/x86/domctl.c             |   12 ++++++------
>  xen/include/public/arch-x86/xen.h |   13 +++++++++++++
>  5 files changed, 32 insertions(+), 19 deletions(-)
> 
> diff --git a/tools/libxc/xc_domain_restore.c
> b/tools/libxc/xc_domain_restore.c index a15f86a..9a22e2a 100644
> --- a/tools/libxc/xc_domain_restore.c
> +++ b/tools/libxc/xc_domain_restore.c
> @@ -2020,15 +2020,15 @@ int xc_domain_restore(xc_interface *xch, int
> io_fd, uint32_t dom, munmap(start_info, PAGE_SIZE);
>          }
>          /* Uncanonicalise each GDT frame number. */
> -        if ( GET_FIELD(ctxt, gdt_ents) > 8192 )
> +        if ( GET_FIELD(ctxt, u.pv.gdt_ents) > 8192 )
>          {
>              ERROR("GDT entry count out of range");
>              goto out;
>          }
>  
> -        for ( j = 0; (512*j) < GET_FIELD(ctxt, gdt_ents); j++ )
> +        for ( j = 0; (512*j) < GET_FIELD(ctxt, u.pv.gdt_ents); j++ )
>          {
> -            pfn = GET_FIELD(ctxt, gdt_frames[j]);
> +            pfn = GET_FIELD(ctxt, u.pv.gdt_frames[j]);
>              if ( (pfn >= dinfo->p2m_size) ||
>                   (pfn_type[pfn] != XEN_DOMCTL_PFINFO_NOTAB) )
>              {
> @@ -2036,7 +2036,7 @@ int xc_domain_restore(xc_interface *xch, int
> io_fd, uint32_t dom, j, (unsigned long)pfn);
>                  goto out;
>              }
> -            SET_FIELD(ctxt, gdt_frames[j], ctx->p2m[pfn]);
> +            SET_FIELD(ctxt, u.pv.gdt_frames[j], ctx->p2m[pfn]);
>          }
>          /* Uncanonicalise the page table base pointer. */
>          pfn = UNFOLD_CR3(GET_FIELD(ctxt, ctrlreg[3]));
> diff --git a/tools/libxc/xc_domain_save.c b/tools/libxc/xc_domain_save.c
> index ff76626..4ec5e7e 100644
> --- a/tools/libxc/xc_domain_save.c
> +++ b/tools/libxc/xc_domain_save.c
> @@ -1900,15 +1900,15 @@ int xc_domain_save(xc_interface *xch, int
> io_fd, uint32_t dom, uint32_t max_iter }
>  
>          /* Canonicalise each GDT frame number. */
> -        for ( j = 0; (512*j) < GET_FIELD(&ctxt, gdt_ents); j++ )
> +        for ( j = 0; (512*j) < GET_FIELD(&ctxt, u.pv.gdt_ents); j++ )
>          {
> -            mfn = GET_FIELD(&ctxt, gdt_frames[j]);
> +            mfn = GET_FIELD(&ctxt, u.pv.gdt_frames[j]);
>              if ( !MFN_IS_IN_PSEUDOPHYS_MAP(mfn) )
>              {
>                  ERROR("GDT frame is not in range of pseudophys map");
>                  goto out;
>              }
> -            SET_FIELD(&ctxt, gdt_frames[j], mfn_to_pfn(mfn));
> +            SET_FIELD(&ctxt, u.pv.gdt_frames[j], mfn_to_pfn(mfn));
>          }
>  
>          /* Canonicalise the page table base pointer. */
> diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
> index 8d30d08..ea1381c 100644
> --- a/xen/arch/x86/domain.c
> +++ b/xen/arch/x86/domain.c
> @@ -780,8 +780,8 @@ int arch_set_info_guest(
>          }
>  
>          for ( i = 0; i < ARRAY_SIZE(v->arch.pv_vcpu.gdt_frames); ++i )
> -            fail |= v->arch.pv_vcpu.gdt_frames[i] != c(gdt_frames[i]);
> -        fail |= v->arch.pv_vcpu.gdt_ents != c(gdt_ents);
> +            fail |= v->arch.pv_vcpu.gdt_frames[i] !=
> c(u.pv.gdt_frames[i]);
> +        fail |= v->arch.pv_vcpu.gdt_ents != c(u.pv.gdt_ents);
>  
>          fail |= v->arch.pv_vcpu.ldt_base != c(ldt_base);
>          fail |= v->arch.pv_vcpu.ldt_ents != c(ldt_ents);
> @@ -830,17 +830,17 @@ int arch_set_info_guest(
>          d->vm_assist = c(vm_assist);
>  
>      if ( !compat )
> -        rc = (int)set_gdt(v, c.nat->gdt_frames, c.nat->gdt_ents);
> +        rc = (int)set_gdt(v, c.nat->u.pv.gdt_frames,
> c.nat->u.pv.gdt_ents); else
>      {
>          unsigned long
> gdt_frames[ARRAY_SIZE(v->arch.pv_vcpu.gdt_frames)];
> -        unsigned int n = (c.cmp->gdt_ents + 511) / 512;
> +        unsigned int n = (c.cmp->u.pv.gdt_ents + 511) / 512;
>  
>          if ( n > ARRAY_SIZE(v->arch.pv_vcpu.gdt_frames) )
>              return -EINVAL;
>          for ( i = 0; i < n; ++i )
> -            gdt_frames[i] = c.cmp->gdt_frames[i];
> -        rc = (int)set_gdt(v, gdt_frames, c.cmp->gdt_ents);
> +            gdt_frames[i] = c.cmp->u.pv.gdt_frames[i];
> +        rc = (int)set_gdt(v, gdt_frames, c.cmp->u.pv.gdt_ents);
>      }
>      if ( rc != 0 )
>          return rc;
> diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
> index a196e2a..31937e0 100644
> --- a/xen/arch/x86/domctl.c
> +++ b/xen/arch/x86/domctl.c
> @@ -1305,12 +1305,12 @@ void arch_get_info_guest(struct vcpu *v,
> vcpu_guest_context_u c) c(ldt_base = v->arch.pv_vcpu.ldt_base);
>          c(ldt_ents = v->arch.pv_vcpu.ldt_ents);
>          for ( i = 0; i < ARRAY_SIZE(v->arch.pv_vcpu.gdt_frames); ++i )
> -            c(gdt_frames[i] = v->arch.pv_vcpu.gdt_frames[i]);
> -        BUILD_BUG_ON(ARRAY_SIZE(c.nat->gdt_frames) !=
> -                     ARRAY_SIZE(c.cmp->gdt_frames));
> -        for ( ; i < ARRAY_SIZE(c.nat->gdt_frames); ++i )
> -            c(gdt_frames[i] = 0);
> -        c(gdt_ents = v->arch.pv_vcpu.gdt_ents);
> +            c(u.pv.gdt_frames[i] = v->arch.pv_vcpu.gdt_frames[i]);
> +        BUILD_BUG_ON(ARRAY_SIZE(c.nat->u.pv.gdt_frames) !=
> +                     ARRAY_SIZE(c.cmp->u.pv.gdt_frames));
> +        for ( ; i < ARRAY_SIZE(c.nat->u.pv.gdt_frames); ++i )
> +            c(u.pv.gdt_frames[i] = 0);
> +        c(u.pv.gdt_ents = v->arch.pv_vcpu.gdt_ents);
>          c(kernel_ss = v->arch.pv_vcpu.kernel_ss);
>          c(kernel_sp = v->arch.pv_vcpu.kernel_sp);
>          for ( i = 0; i < ARRAY_SIZE(v->arch.pv_vcpu.ctrlreg); ++i )
> diff --git a/xen/include/public/arch-x86/xen.h
> b/xen/include/public/arch-x86/xen.h index b7f6a51..ea72532 100644
> --- a/xen/include/public/arch-x86/xen.h
> +++ b/xen/include/public/arch-x86/xen.h
> @@ -170,7 +170,20 @@ struct vcpu_guest_context {
>      struct cpu_user_regs user_regs;         /* User-level CPU
> registers     */ struct trap_info trap_ctxt[256];        /* Virtual
> IDT                  */ unsigned long ldt_base, ldt_ents;       /* LDT
> (linear address, # ents) */ +#if __XEN_INTERFACE_VERSION__ < 0x00040300
>      unsigned long gdt_frames[16], gdt_ents; /* GDT (machine frames, #
> ents) */ +#else
> +    union {
> +        struct {
> +            /* GDT (machine frames, # ents) */
> +            unsigned long gdt_frames[16], gdt_ents;
> +        } pv;
> +        struct {
> +            /* PVH: GDTR addr and size */   
> +            unsigned long gdtaddr, gdtsz;
> +        } pvh;
> +    } u;
> +#endif
>      unsigned long kernel_ss, kernel_sp;     /* Virtual TSS (only
> SS1/SP1)   */ /* NB. User pagetable on x86/64 is placed in ctrlreg[1].
> */ unsigned long ctrlreg[8];               /* CR0-CR7 (control
> registers)  */
> -- 
> 1.7.2.3
> 
> 
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@xxxxxxxxxxxxx
> http://lists.xen.org/xen-devel
> 

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.