[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v5 1/4] nested vmx: Use a list to store the launched vvmcs for L1 VMM
Originally we use a virtual VMCS field to store the launch state of a certain vmcs. However if we introduce VMCS shadowing feature, this virtual VMCS should also be able to load into real hardware, and VMREAD/VMWRITE operate invalid fields. The new approach is to store the launch state into a list for L1 VMM. Signed-off-by: Dongxiao Xu <dongxiao.xu@xxxxxxxxx> --- xen/arch/x86/hvm/vmx/vvmx.c | 96 ++++++++++++++++++++++++++++++++---- xen/include/asm-x86/hvm/vmx/vmcs.h | 2 - xen/include/asm-x86/hvm/vmx/vvmx.h | 6 ++ 3 files changed, 92 insertions(+), 12 deletions(-) diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c index d4e9b02..a0e49c4 100644 --- a/xen/arch/x86/hvm/vmx/vvmx.c +++ b/xen/arch/x86/hvm/vmx/vvmx.c @@ -51,6 +51,7 @@ int nvmx_vcpu_initialise(struct vcpu *v) nvmx->iobitmap[0] = NULL; nvmx->iobitmap[1] = NULL; nvmx->msrbitmap = NULL; + INIT_LIST_HEAD(&nvmx->launched_list); return 0; out: return -ENOMEM; @@ -58,7 +59,9 @@ out: void nvmx_vcpu_destroy(struct vcpu *v) { + struct nestedvmx *nvmx = &vcpu_2_nvmx(v); struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v); + struct vvmcs_list *item, *n; /* * When destroying the vcpu, it may be running on behalf of L2 guest. @@ -74,6 +77,12 @@ void nvmx_vcpu_destroy(struct vcpu *v) free_xenheap_page(nvcpu->nv_n2vmcx); nvcpu->nv_n2vmcx = NULL; } + + list_for_each_entry_safe(item, n, &nvmx->launched_list, node) + { + list_del(&item->node); + xfree(item); + } } void nvmx_domain_relinquish_resources(struct domain *d) @@ -1198,6 +1207,62 @@ int nvmx_handle_vmxoff(struct cpu_user_regs *regs) return X86EMUL_OKAY; } +static bool_t vvmcs_launched(struct list_head *launched_list, + unsigned long vvmcs_mfn) +{ + struct vvmcs_list *vvmcs; + struct list_head *pos; + bool_t launched = 0; + + list_for_each(pos, launched_list) + { + vvmcs = list_entry(pos, struct vvmcs_list, node); + if ( vvmcs_mfn == vvmcs->vvmcs_mfn ) + { + launched = 1; + break; + } + } + + return launched; +} + +static int set_vvmcs_launched(struct list_head *launched_list, + unsigned long vvmcs_mfn) +{ + struct vvmcs_list *vvmcs; + + if ( vvmcs_launched(launched_list, vvmcs_mfn) ) + return 0; + + vvmcs = xzalloc(struct vvmcs_list); + if ( !vvmcs ) + return -ENOMEM; + + vvmcs->vvmcs_mfn = vvmcs_mfn; + list_add(&vvmcs->node, launched_list); + + return 0; +} + +static void clear_vvmcs_launched(struct list_head *launched_list, + paddr_t vvmcs_mfn) +{ + struct vvmcs_list *vvmcs; + struct list_head *pos; + + list_for_each(pos, launched_list) + { + vvmcs = list_entry(pos, struct vvmcs_list, node); + if ( vvmcs_mfn == vvmcs->vvmcs_mfn ) + { + list_del(&vvmcs->node); + xfree(vvmcs); + break; + } + } +} + int nvmx_vmresume(struct vcpu *v, struct cpu_user_regs *regs) { struct nestedvmx *nvmx = &vcpu_2_nvmx(v); @@ -1221,8 +1286,10 @@ int nvmx_vmresume(struct vcpu *v, struct cpu_user_regs *regs) int nvmx_handle_vmresume(struct cpu_user_regs *regs) { - int launched; + bool_t launched; struct vcpu *v = current; + struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v); + struct nestedvmx *nvmx = &vcpu_2_nvmx(v); if ( vcpu_nestedhvm(v).nv_vvmcxaddr == VMCX_EADDR ) { @@ -1230,8 +1297,8 @@ int nvmx_handle_vmresume(struct cpu_user_regs *regs) return X86EMUL_OKAY; } - launched = __get_vvmcs(vcpu_nestedhvm(v).nv_vvmcx, - NVMX_LAUNCH_STATE); + launched = vvmcs_launched(&nvmx->launched_list, + domain_page_map_to_mfn(nvcpu->nv_vvmcx)); if ( !launched ) { vmreturn (regs, VMFAIL_VALID); return X86EMUL_OKAY; @@ -1241,9 +1308,11 @@ int nvmx_handle_vmresume(struct cpu_user_regs *regs) int nvmx_handle_vmlaunch(struct cpu_user_regs *regs) { - int launched; + bool_t launched; int rc; struct vcpu *v = current; + struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v); + struct nestedvmx *nvmx = &vcpu_2_nvmx(v); if ( vcpu_nestedhvm(v).nv_vvmcxaddr == VMCX_EADDR ) { @@ -1251,8 +1320,8 @@ int nvmx_handle_vmlaunch(struct cpu_user_regs *regs) return X86EMUL_OKAY; } - launched = __get_vvmcs(vcpu_nestedhvm(v).nv_vvmcx, - NVMX_LAUNCH_STATE); + launched = vvmcs_launched(&nvmx->launched_list, + domain_page_map_to_mfn(nvcpu->nv_vvmcx)); if ( launched ) { vmreturn (regs, VMFAIL_VALID); return X86EMUL_OKAY; @@ -1260,8 +1329,12 @@ int nvmx_handle_vmlaunch(struct cpu_user_regs *regs) else { rc = nvmx_vmresume(v,regs); if ( rc == X86EMUL_OKAY ) - __set_vvmcs(vcpu_nestedhvm(v).nv_vvmcx, - NVMX_LAUNCH_STATE, 1); + { + if ( set_vvmcs_launched(&nvmx->launched_list, + domain_page_map_to_mfn(nvcpu->nv_vvmcx)) < 0 ) + return X86EMUL_UNHANDLEABLE; + } + } return rc; } @@ -1328,6 +1401,7 @@ int nvmx_handle_vmclear(struct cpu_user_regs *regs) struct vcpu *v = current; struct vmx_inst_decoded decode; struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v); + struct nestedvmx *nvmx = &vcpu_2_nvmx(v); unsigned long gpa = 0; void *vvmcs; int rc; @@ -1344,7 +1418,8 @@ int nvmx_handle_vmclear(struct cpu_user_regs *regs) if ( gpa == nvcpu->nv_vvmcxaddr ) { - __set_vvmcs(nvcpu->nv_vvmcx, NVMX_LAUNCH_STATE, 0); + clear_vvmcs_launched(&nvmx->launched_list, + domain_page_map_to_mfn(nvcpu->nv_vvmcx)); nvmx_purge_vvmcs(v); } else @@ -1352,7 +1427,8 @@ int nvmx_handle_vmclear(struct cpu_user_regs *regs) /* Even if this VMCS isn't the current one, we must clear it. */ vvmcs = hvm_map_guest_frame_rw(gpa >> PAGE_SHIFT); if ( vvmcs ) - __set_vvmcs(vvmcs, NVMX_LAUNCH_STATE, 0); + clear_vvmcs_launched(&nvmx->launched_list, + domain_page_map_to_mfn(vvmcs)); hvm_unmap_guest_frame(vvmcs); } diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h b/xen/include/asm-x86/hvm/vmx/vmcs.h index 51df81e..9ff741f 100644 --- a/xen/include/asm-x86/hvm/vmx/vmcs.h +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h @@ -421,8 +421,6 @@ enum vmcs_field { HOST_SYSENTER_EIP = 0x00006c12, HOST_RSP = 0x00006c14, HOST_RIP = 0x00006c16, - /* A virtual VMCS field used for nestedvmx only */ - NVMX_LAUNCH_STATE = 0x00006c20, }; #define VMCS_VPID_WIDTH 16 diff --git a/xen/include/asm-x86/hvm/vmx/vvmx.h b/xen/include/asm-x86/hvm/vmx/vvmx.h index 9e1dc77..89e839f 100644 --- a/xen/include/asm-x86/hvm/vmx/vvmx.h +++ b/xen/include/asm-x86/hvm/vmx/vvmx.h @@ -23,6 +23,11 @@ #ifndef __ASM_X86_HVM_VVMX_H__ #define __ASM_X86_HVM_VVMX_H__ +struct vvmcs_list { + unsigned long vvmcs_mfn; + struct list_head node; +}; + struct nestedvmx { paddr_t vmxon_region_pa; void *iobitmap[2]; /* map (va) of L1 guest I/O bitmap */ @@ -38,6 +43,7 @@ struct nestedvmx { uint32_t exit_qual; } ept; uint32_t guest_vpid; + struct list_head launched_list; }; #define vcpu_2_nvmx(v) (vcpu_nestedhvm(v).u.nvmx) -- 1.7.1 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |