[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 8/18 V2]: PVH xen: domain creation code changes



> diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
> index 2fa2ea5..31aa04f 100644
> --- a/xen/include/asm-x86/hvm/hvm.h
> +++ b/xen/include/asm-x86/hvm/hvm.h
> @@ -190,6 +190,11 @@ struct hvm_function_table {
>                                  paddr_t *L1_gpa, unsigned int *page_order,
>                                  uint8_t *p2m_acc, bool_t access_r,
>                                  bool_t access_w, bool_t access_x);
> +    /* PVH functions */
> +    int (*pvh_set_vcpu_info)(struct vcpu *v, struct vcpu_guest_context 
> *ctxtp);
> +    int (*pvh_read_descriptor)(unsigned int sel, const struct vcpu *v,
> +                         const struct cpu_user_regs *regs, unsigned long 
> *base,
> +                         unsigned long *limit, unsigned int *ar);

Ewww.. Please remove the 'pvh_' part and have a comment saying:
        /* These two functions are used only in PVH mode. */

>  };
>  
>  extern struct hvm_function_table hvm_funcs;
> @@ -323,6 +328,19 @@ static inline unsigned long 
> hvm_get_shadow_gs_base(struct vcpu *v)
>      return hvm_funcs.get_shadow_gs_base(v);
>  }
>  
> +static inline int hvm_pvh_set_vcpu_info(struct vcpu *v, 
> +                                        struct vcpu_guest_context *ctxtp)
> +{
> +    return hvm_funcs.pvh_set_vcpu_info(v, ctxtp);
> +}
> +
> +static inline int hvm_pvh_read_descriptor(unsigned int sel, 
> +               const struct vcpu *v, const struct cpu_user_regs *regs, 
> +               unsigned long *base, unsigned long *limit, unsigned int *ar)
> +{
> +    return hvm_funcs.pvh_read_descriptor(sel, v, regs, base, limit, ar);
> +}
> +
>  #define is_viridian_domain(_d)                                             \
>   (is_hvm_domain(_d) && ((_d)->arch.hvm_domain.params[HVM_PARAM_VIRIDIAN]))
>  
> diff --git a/xen/include/asm-x86/hvm/vcpu.h b/xen/include/asm-x86/hvm/vcpu.h
> index e8b8cd7..2725a62 100644
> --- a/xen/include/asm-x86/hvm/vcpu.h
> +++ b/xen/include/asm-x86/hvm/vcpu.h
> @@ -104,6 +104,13 @@ struct nestedvcpu {
>  
>  #define vcpu_nestedhvm(v) ((v)->arch.hvm_vcpu.nvcpu)
>  
> +/* add any PVH specific fields here */
> +struct pvh_hvm_vcpu_ext
> +{
> +    /* Guest-specified relocation of vcpu_info. */
> +    unsigned long vcpu_info_mfn;
> +};
> +
>  struct hvm_vcpu {
>      /* Guest control-register and EFER values, just as the guest sees them. 
> */
>      unsigned long       guest_cr[5];
> @@ -170,6 +177,8 @@ struct hvm_vcpu {
>      struct hvm_trap     inject_trap;
>  
>      struct viridian_vcpu viridian;
> +
> +    struct pvh_hvm_vcpu_ext hvm_pvh;

Can you remove the two 'hvm' parts? So it is
        struct pvh_vcpu_ext pvh;

?
>  };
>  
>  #endif /* __ASM_X86_HVM_VCPU_H__ */
> diff --git a/xen/include/asm-x86/system.h b/xen/include/asm-x86/system.h
> index d8dc6f2..5681806 100644
> --- a/xen/include/asm-x86/system.h
> +++ b/xen/include/asm-x86/system.h
> @@ -4,9 +4,15 @@
>  #include <xen/lib.h>
>  #include <asm/bitops.h>
>  
> +/* We need vcpu because during context switch, going from pure PV to PVH,
> + * in save_segments(), current has been updated to next, and no longer 
> pointing
> + * to the pure PV. Note: for PVH, we update regs->selectors on each vmexit */
>  #define read_segment_register(vcpu, regs, name)                 \
>  ({  u16 __sel;                                                  \
> -    asm volatile ( "movw %%" STR(name) ",%0" : "=r" (__sel) );  \
> +    if (is_pvh_vcpu(vcpu))                                         \
> +        __sel = regs->name;                                     \
> +    else                                                         \
> +        asm volatile ( "movw %%" STR(name) ",%0" : "=r" (__sel) );  \
>      __sel;                                                      \
>  })
>  
> -- 
> 1.7.2.3
> 
> 
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@xxxxxxxxxxxxx
> http://lists.xen.org/xen-devel
> 

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.