[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 05/18] PVH xen: hvm/vmcs related preparatory changes for PVH



>>> On 25.06.13 at 02:01, Mukesh Rathor <mukesh.rathor@xxxxxxxxxx> wrote:
> In this patch, some common code is factored out to create
> vmx_set_common_host_vmcs_fields() to be used by PVH. Also, some changes
> in hvm.c as hvm_domain.params is not set for PVH.
> 
> Acked-by: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
> Signed-off-by: Mukesh Rathor <mukesh.rathor@xxxxxxxxxx>
> ---
>  xen/arch/x86/hvm/hvm.c      |   10 ++++---
>  xen/arch/x86/hvm/vmx/vmcs.c |   58 +++++++++++++++++++++++-------------------

The changes to the two files don't appear to be connected to one
another in any way - please make this a HVM patch and a VMX
patch, thus also identifiable easily via patch title.

As patch 4 already was VMX-specific, perhaps the HVM one
should come before the vendor specific ones?

Jan

> diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
> index 43b6d05..118e21a 100644
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -1070,10 +1070,13 @@ int hvm_vcpu_initialise(struct vcpu *v)
>  {
>      int rc;
>      struct domain *d = v->domain;
> -    domid_t dm_domid = d->arch.hvm_domain.params[HVM_PARAM_DM_DOMAIN];
> +    domid_t dm_domid;
>  
>      hvm_asid_flush_vcpu(v);
>  
> +    spin_lock_init(&v->arch.hvm_vcpu.tm_lock);
> +    INIT_LIST_HEAD(&v->arch.hvm_vcpu.tm_list);
> +
>      if ( (rc = vlapic_init(v)) != 0 )
>          goto fail1;
>  
> @@ -1084,6 +1087,8 @@ int hvm_vcpu_initialise(struct vcpu *v)
>           && (rc = nestedhvm_vcpu_initialise(v)) < 0 ) 
>          goto fail3;
>  
> +    dm_domid = d->arch.hvm_domain.params[HVM_PARAM_DM_DOMAIN];
> +
>      /* Create ioreq event channel. */
>      rc = alloc_unbound_xen_event_channel(v, dm_domid, NULL);
>      if ( rc < 0 )
> @@ -1106,9 +1111,6 @@ int hvm_vcpu_initialise(struct vcpu *v)
>          get_ioreq(v)->vp_eport = v->arch.hvm_vcpu.xen_port;
>      spin_unlock(&d->arch.hvm_domain.ioreq.lock);
>  
> -    spin_lock_init(&v->arch.hvm_vcpu.tm_lock);
> -    INIT_LIST_HEAD(&v->arch.hvm_vcpu.tm_list);
> -
>      v->arch.hvm_vcpu.inject_trap.vector = -1;
>  
>      rc = setup_compat_arg_xlat(v);
> diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
> index ef0ee7f..43539a6 100644
> --- a/xen/arch/x86/hvm/vmx/vmcs.c
> +++ b/xen/arch/x86/hvm/vmx/vmcs.c
> @@ -825,11 +825,40 @@ void virtual_vmcs_vmwrite(void *vvmcs, u32 
> vmcs_encoding, u64 val)
>      virtual_vmcs_exit(vvmcs);
>  }
>  
> -static int construct_vmcs(struct vcpu *v)
> +static void vmx_set_common_host_vmcs_fields(struct vcpu *v)
>  {
> -    struct domain *d = v->domain;
>      uint16_t sysenter_cs;
>      unsigned long sysenter_eip;
> +
> +    /* Host data selectors. */
> +    __vmwrite(HOST_SS_SELECTOR, __HYPERVISOR_DS);
> +    __vmwrite(HOST_DS_SELECTOR, __HYPERVISOR_DS);
> +    __vmwrite(HOST_ES_SELECTOR, __HYPERVISOR_DS);
> +    __vmwrite(HOST_FS_SELECTOR, 0);
> +    __vmwrite(HOST_GS_SELECTOR, 0);
> +    __vmwrite(HOST_FS_BASE, 0);
> +    __vmwrite(HOST_GS_BASE, 0);
> +
> +    /* Host control registers. */
> +    v->arch.hvm_vmx.host_cr0 = read_cr0() | X86_CR0_TS;
> +    __vmwrite(HOST_CR0, v->arch.hvm_vmx.host_cr0);
> +    __vmwrite(HOST_CR4,
> +              mmu_cr4_features | (xsave_enabled(v) ? X86_CR4_OSXSAVE : 0));
> +
> +    /* Host CS:RIP. */
> +    __vmwrite(HOST_CS_SELECTOR, __HYPERVISOR_CS);
> +    __vmwrite(HOST_RIP, (unsigned long)vmx_asm_vmexit_handler);
> +
> +    /* Host SYSENTER CS:RIP. */
> +    rdmsrl(MSR_IA32_SYSENTER_CS, sysenter_cs);
> +    __vmwrite(HOST_SYSENTER_CS, sysenter_cs);
> +    rdmsrl(MSR_IA32_SYSENTER_EIP, sysenter_eip);
> +    __vmwrite(HOST_SYSENTER_EIP, sysenter_eip);
> +}
> +
> +static int construct_vmcs(struct vcpu *v)
> +{
> +    struct domain *d = v->domain;
>      u32 vmexit_ctl = vmx_vmexit_control;
>      u32 vmentry_ctl = vmx_vmentry_control;
>  
> @@ -932,30 +961,7 @@ static int construct_vmcs(struct vcpu *v)
>          __vmwrite(POSTED_INTR_NOTIFICATION_VECTOR, posted_intr_vector);
>      }
>  
> -    /* Host data selectors. */
> -    __vmwrite(HOST_SS_SELECTOR, __HYPERVISOR_DS);
> -    __vmwrite(HOST_DS_SELECTOR, __HYPERVISOR_DS);
> -    __vmwrite(HOST_ES_SELECTOR, __HYPERVISOR_DS);
> -    __vmwrite(HOST_FS_SELECTOR, 0);
> -    __vmwrite(HOST_GS_SELECTOR, 0);
> -    __vmwrite(HOST_FS_BASE, 0);
> -    __vmwrite(HOST_GS_BASE, 0);
> -
> -    /* Host control registers. */
> -    v->arch.hvm_vmx.host_cr0 = read_cr0() | X86_CR0_TS;
> -    __vmwrite(HOST_CR0, v->arch.hvm_vmx.host_cr0);
> -    __vmwrite(HOST_CR4,
> -              mmu_cr4_features | (xsave_enabled(v) ? X86_CR4_OSXSAVE : 0));
> -
> -    /* Host CS:RIP. */
> -    __vmwrite(HOST_CS_SELECTOR, __HYPERVISOR_CS);
> -    __vmwrite(HOST_RIP, (unsigned long)vmx_asm_vmexit_handler);
> -
> -    /* Host SYSENTER CS:RIP. */
> -    rdmsrl(MSR_IA32_SYSENTER_CS, sysenter_cs);
> -    __vmwrite(HOST_SYSENTER_CS, sysenter_cs);
> -    rdmsrl(MSR_IA32_SYSENTER_EIP, sysenter_eip);
> -    __vmwrite(HOST_SYSENTER_EIP, sysenter_eip);
> +    vmx_set_common_host_vmcs_fields(v);
>  
>      /* MSR intercepts. */
>      __vmwrite(VM_EXIT_MSR_LOAD_COUNT, 0);
> -- 
> 1.7.2.3
> 
> 
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@xxxxxxxxxxxxx 
> http://lists.xen.org/xen-devel 



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.