[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v4 3/4] nested vmx: optimize for bulk access of virtual VMCS
After we use the VMREAD/VMWRITE to build up the virtual VMCS, each access to the virtual VMCS needs two VMPTRLD and one VMCLEAR to switch the environment, which might be an overhead to performance. This commit tries to handle multiple virtual VMCS access together to improve the performance. Signed-off-by: Dongxiao Xu <dongxiao.xu@xxxxxxxxx> --- xen/arch/x86/hvm/vmx/vvmx.c | 103 +++++++++++++++++++++++++++++++++------ xen/include/asm-x86/hvm/vcpu.h | 2 + 2 files changed, 89 insertions(+), 16 deletions(-) diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c index 3221dd2..e401b63 100644 --- a/xen/arch/x86/hvm/vmx/vvmx.c +++ b/xen/arch/x86/hvm/vmx/vvmx.c @@ -30,6 +30,7 @@ static void nvmx_purge_vvmcs(struct vcpu *v); +#define VMCS_BUF_SIZE 500 int nvmx_vcpu_initialise(struct vcpu *v) { struct nestedvmx *nvmx = &vcpu_2_nvmx(v); @@ -41,6 +42,12 @@ int nvmx_vcpu_initialise(struct vcpu *v) gdprintk(XENLOG_ERR, "nest: allocation for shadow vmcs failed\n"); goto out; } + + nvcpu->vvmcx_buf = xzalloc_array(u64, VMCS_BUF_SIZE); + /* If vmcs buffer allocation failed, we will fall back to default mode. */ + if ( !nvcpu->vvmcx_buf ) + gdprintk(XENLOG_WARNING, "nest: allocating exchanging buffer failed\n"); + nvmx->ept.enabled = 0; nvmx->guest_vpid = 0; nvmx->vmxon_region_pa = 0; @@ -83,6 +90,9 @@ void nvmx_vcpu_destroy(struct vcpu *v) list_del(&item->node); xfree(item); } + + if ( nvcpu->vvmcx_buf ) + xfree(nvcpu->vvmcx_buf); } void nvmx_domain_relinquish_resources(struct domain *d) @@ -830,6 +840,35 @@ static void vvmcs_to_shadow(void *vvmcs, unsigned int field) __vmwrite(field, value); } +static void vvmcs_to_shadow_bulk(struct vcpu *v, unsigned int n, + const u16 *field) +{ + struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v); + void *vvmcs = nvcpu->nv_vvmcx; + u64 *value = nvcpu->vvmcx_buf; + unsigned int i; + + if ( !cpu_has_vmx_vmcs_shadowing ) + goto fallback; + + if ( !value || n > VMCS_BUF_SIZE ) + goto fallback; + + virtual_vmcs_enter(vvmcs); + for ( i = 0; i < n; i++ ) + value[i] = __vmread(field[i]); + virtual_vmcs_exit(vvmcs); + + for ( i = 0; i < n; i++ ) + __vmwrite(field[i], value[i]); + + return; + +fallback: + for ( i = 0; i < n; i++ ) + vvmcs_to_shadow(vvmcs, field[i]); +} + static void shadow_to_vvmcs(void *vvmcs, unsigned int field) { u64 value; @@ -840,6 +879,35 @@ static void shadow_to_vvmcs(void *vvmcs, unsigned int field) __set_vvmcs(vvmcs, field, value); } +static void shadow_to_vvmcs_bulk(struct vcpu *v, unsigned int n, + const u16 *field) +{ + struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v); + void *vvmcs = nvcpu->nv_vvmcx; + u64 *value = nvcpu->vvmcx_buf; + unsigned int i; + + if ( !cpu_has_vmx_vmcs_shadowing ) + goto fallback; + + if ( !value || n > VMCS_BUF_SIZE ) + goto fallback; + + for ( i = 0; i < n; i++ ) + value[i] = __vmread(field[i]); + + virtual_vmcs_enter(vvmcs); + for ( i = 0; i < n; i++ ) + __vmwrite(field[i], value[i]); + virtual_vmcs_exit(vvmcs); + + return; + +fallback: + for ( i = 0; i < n; i++ ) + shadow_to_vvmcs(vvmcs, field[i]); +} + static void load_shadow_control(struct vcpu *v) { /* @@ -863,13 +931,18 @@ static void load_shadow_guest_state(struct vcpu *v) { struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v); void *vvmcs = nvcpu->nv_vvmcx; - int i; u32 control; u64 cr_gh_mask, cr_read_shadow; + static const u16 vmentry_fields[] = { + VM_ENTRY_INTR_INFO, + VM_ENTRY_EXCEPTION_ERROR_CODE, + VM_ENTRY_INSTRUCTION_LEN, + }; + /* vvmcs.gstate to shadow vmcs.gstate */ - for ( i = 0; i < ARRAY_SIZE(vmcs_gstate_field); i++ ) - vvmcs_to_shadow(vvmcs, vmcs_gstate_field[i]); + vvmcs_to_shadow_bulk(v, ARRAY_SIZE(vmcs_gstate_field), + vmcs_gstate_field); hvm_set_cr0(__get_vvmcs(vvmcs, GUEST_CR0)); hvm_set_cr4(__get_vvmcs(vvmcs, GUEST_CR4)); @@ -883,9 +956,7 @@ static void load_shadow_guest_state(struct vcpu *v) hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset); - vvmcs_to_shadow(vvmcs, VM_ENTRY_INTR_INFO); - vvmcs_to_shadow(vvmcs, VM_ENTRY_EXCEPTION_ERROR_CODE); - vvmcs_to_shadow(vvmcs, VM_ENTRY_INSTRUCTION_LEN); + vvmcs_to_shadow_bulk(v, ARRAY_SIZE(vmentry_fields), vmentry_fields); /* * While emulate CR0 and CR4 for nested virtualization, set the CR0/CR4 @@ -905,10 +976,13 @@ static void load_shadow_guest_state(struct vcpu *v) if ( nvmx_ept_enabled(v) && hvm_pae_enabled(v) && (v->arch.hvm_vcpu.guest_efer & EFER_LMA) ) { - vvmcs_to_shadow(vvmcs, GUEST_PDPTR0); - vvmcs_to_shadow(vvmcs, GUEST_PDPTR1); - vvmcs_to_shadow(vvmcs, GUEST_PDPTR2); - vvmcs_to_shadow(vvmcs, GUEST_PDPTR3); + static const u16 gpdptr_fields[] = { + GUEST_PDPTR0, + GUEST_PDPTR1, + GUEST_PDPTR2, + GUEST_PDPTR3, + }; + vvmcs_to_shadow_bulk(v, ARRAY_SIZE(gpdptr_fields), gpdptr_fields); } /* TODO: CR3 target control */ @@ -999,13 +1073,12 @@ static void virtual_vmentry(struct cpu_user_regs *regs) static void sync_vvmcs_guest_state(struct vcpu *v, struct cpu_user_regs *regs) { - int i; struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v); void *vvmcs = nvcpu->nv_vvmcx; /* copy shadow vmcs.gstate back to vvmcs.gstate */ - for ( i = 0; i < ARRAY_SIZE(vmcs_gstate_field); i++ ) - shadow_to_vvmcs(vvmcs, vmcs_gstate_field[i]); + shadow_to_vvmcs_bulk(v, ARRAY_SIZE(vmcs_gstate_field), + vmcs_gstate_field); /* RIP, RSP are in user regs */ __set_vvmcs(vvmcs, GUEST_RIP, regs->eip); __set_vvmcs(vvmcs, GUEST_RSP, regs->esp); @@ -1017,13 +1090,11 @@ static void sync_vvmcs_guest_state(struct vcpu *v, struct cpu_user_regs *regs) static void sync_vvmcs_ro(struct vcpu *v) { - int i; struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v); struct nestedvmx *nvmx = &vcpu_2_nvmx(v); void *vvmcs = nvcpu->nv_vvmcx; - for ( i = 0; i < ARRAY_SIZE(vmcs_ro_field); i++ ) - shadow_to_vvmcs(nvcpu->nv_vvmcx, vmcs_ro_field[i]); + shadow_to_vvmcs_bulk(v, ARRAY_SIZE(vmcs_ro_field), vmcs_ro_field); /* Adjust exit_reason/exit_qualifciation for violation case */ if ( __get_vvmcs(vvmcs, VM_EXIT_REASON) == EXIT_REASON_EPT_VIOLATION ) diff --git a/xen/include/asm-x86/hvm/vcpu.h b/xen/include/asm-x86/hvm/vcpu.h index e8b8cd7..c9ffafc 100644 --- a/xen/include/asm-x86/hvm/vcpu.h +++ b/xen/include/asm-x86/hvm/vcpu.h @@ -100,6 +100,8 @@ struct nestedvcpu { */ bool_t nv_ioport80; bool_t nv_ioportED; + + u64 *vvmcx_buf; /* A temp buffer for data exchange */ }; #define vcpu_nestedhvm(v) ((v)->arch.hvm_vcpu.nvcpu) -- 1.7.1 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |