[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 11/11] nVMX: Expose VPID capability to nested VMM.
From: Zhang Xiantao <xiantao.zhang@xxxxxxxxx> Virtualize VPID for the nested vmm, use host's VPID to emualte guest's VPID. For each virtual vmentry, if guest'v vpid is changed, allocate a new host VPID for L2 guest. Signed-off-by: Zhang Xiantao <xiantao.zhang@xxxxxxxxx> --- xen/arch/x86/hvm/vmx/vmx.c | 10 +++++- xen/arch/x86/hvm/vmx/vvmx.c | 60 +++++++++++++++++++++++++++++++++++- xen/arch/x86/mm/hap/nested_ept.c | 7 ++-- xen/include/asm-x86/hvm/vmx/vvmx.h | 2 + 4 files changed, 73 insertions(+), 6 deletions(-) diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c index 36f6d82..fb40392 100644 --- a/xen/arch/x86/hvm/vmx/vmx.c +++ b/xen/arch/x86/hvm/vmx/vmx.c @@ -2626,10 +2626,13 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs) if ( nvmx_handle_invept(regs) == X86EMUL_OKAY ) update_guest_eip(); break; + case EXIT_REASON_INVVPID: + if ( nvmx_handle_invvpid(regs) == X86EMUL_OKAY ) + update_guest_eip(); + break; case EXIT_REASON_MWAIT_INSTRUCTION: case EXIT_REASON_MONITOR_INSTRUCTION: case EXIT_REASON_GETSEC: - case EXIT_REASON_INVVPID: /* * We should never exit on GETSEC because CR4.SMXE is always 0 when * running in guest context, and the CPU checks that before getting @@ -2747,8 +2750,11 @@ void vmx_vmenter_helper(void) if ( !cpu_has_vmx_vpid ) goto out; + if ( nestedhvm_vcpu_in_guestmode(curr) ) + p_asid = &vcpu_nestedhvm(curr).nv_n2asid; + else + p_asid = &curr->arch.hvm_vcpu.n1asid; - p_asid = &curr->arch.hvm_vcpu.n1asid; old_asid = p_asid->asid; need_flush = hvm_asid_handle_vmenter(p_asid); new_asid = p_asid->asid; diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c index ec875d2..28a8e78 100644 --- a/xen/arch/x86/hvm/vmx/vvmx.c +++ b/xen/arch/x86/hvm/vmx/vvmx.c @@ -42,6 +42,7 @@ int nvmx_vcpu_initialise(struct vcpu *v) goto out; } nvmx->ept.enabled = 0; + nvmx->guest_vpid = 0; nvmx->vmxon_region_pa = 0; nvcpu->nv_vvmcx = NULL; nvcpu->nv_vvmcxaddr = VMCX_EADDR; @@ -849,6 +850,16 @@ static uint64_t get_shadow_eptp(struct vcpu *v) return ept_data->ept_ctl.eptp; } +static bool_t nvmx_vpid_enabled(struct nestedvcpu *nvcpu) +{ + uint32_t second_cntl; + + second_cntl = __get_vvmcs(nvcpu->nv_vvmcx, SECONDARY_VM_EXEC_CONTROL); + if ( second_cntl & SECONDARY_EXEC_ENABLE_VPID ) + return 1; + return 0; +} + static void virtual_vmentry(struct cpu_user_regs *regs) { struct vcpu *v = current; @@ -897,6 +908,18 @@ static void virtual_vmentry(struct cpu_user_regs *regs) if ( nestedhvm_paging_mode_hap(v) ) __vmwrite(EPT_POINTER, get_shadow_eptp(v)); + /* nested VPID support! */ + if ( cpu_has_vmx_vpid && nvmx_vpid_enabled(nvcpu) ) + { + struct nestedvmx *nvmx = &vcpu_2_nvmx(v); + uint32_t new_vpid = __get_vvmcs(vvmcs, VIRTUAL_PROCESSOR_ID); + if ( nvmx->guest_vpid != new_vpid ) + { + hvm_asid_flush_vcpu_asid(&vcpu_nestedhvm(v).nv_n2asid); + nvmx->guest_vpid = new_vpid; + } + } + } static void sync_vvmcs_guest_state(struct vcpu *v, struct cpu_user_regs *regs) @@ -1188,7 +1211,7 @@ int nvmx_handle_vmlaunch(struct cpu_user_regs *regs) if ( vcpu_nestedhvm(v).nv_vvmcxaddr == VMCX_EADDR ) { vmreturn (regs, VMFAIL_INVALID); - return X86EMUL_OKAY; + return X86EMUL_OKAY; } launched = __get_vvmcs(vcpu_nestedhvm(v).nv_vvmcx, @@ -1363,6 +1386,9 @@ int nvmx_handle_invept(struct cpu_user_regs *regs) unsigned long eptp; u64 inv_type; + if(!cpu_has_vmx_ept) + return X86EMUL_EXCEPTION; + if ( decode_vmx_inst(regs, &decode, &eptp, 0) != X86EMUL_OKAY ) return X86EMUL_EXCEPTION; @@ -1401,6 +1427,37 @@ int nvmx_handle_invept(struct cpu_user_regs *regs) (((__emul_value(enable1, default1) & host_value) & (~0ul << 32)) | \ ((uint32_t)(__emul_value(enable1, default1) | host_value))) +int nvmx_handle_invvpid(struct cpu_user_regs *regs) +{ + struct vmx_inst_decoded decode; + unsigned long vpid; + u64 inv_type; + + if(!cpu_has_vmx_vpid) + return X86EMUL_EXCEPTION; + + if ( decode_vmx_inst(regs, &decode, &vpid, 0) + != X86EMUL_OKAY ) + return X86EMUL_EXCEPTION; + + inv_type = reg_read(regs, decode.reg2); + gdprintk(XENLOG_DEBUG,"inv_type:%ld, vpid:%lx\n", inv_type, vpid); + + switch ( inv_type ){ + /* Just invalidate all tlb entries for all types! */ + case INVVPID_INDIVIDUAL_ADDR: + case INVVPID_SINGLE_CONTEXT: + case INVVPID_ALL_CONTEXT: + hvm_asid_flush_vcpu_asid(&vcpu_nestedhvm(current).nv_n2asid); + break; + default: + return X86EMUL_EXCEPTION; + } + vmreturn(regs, VMSUCCEED); + + return X86EMUL_OKAY; +} + /* * Capability reporting */ @@ -1458,6 +1515,7 @@ int nvmx_msr_read_intercept(unsigned int msr, u64 *msr_content) /* 1-seetings */ data = SECONDARY_EXEC_DESCRIPTOR_TABLE_EXITING | SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | + SECONDARY_EXEC_ENABLE_VPID | SECONDARY_EXEC_ENABLE_EPT; data = gen_vmx_msr(data, 0, host_data); break; diff --git a/xen/arch/x86/mm/hap/nested_ept.c b/xen/arch/x86/mm/hap/nested_ept.c index 8dfb70a..d0be5ce 100644 --- a/xen/arch/x86/mm/hap/nested_ept.c +++ b/xen/arch/x86/mm/hap/nested_ept.c @@ -48,7 +48,7 @@ #define EPT_EMT_WB 6 #define EPT_EMT_UC 0 -#define NEPT_VPID_CAP_BITS 0x0000000006134140ul +#define NEPT_VPID_CAP_BITS 0xf0106134140ul #define NEPT_1G_ENTRY_FLAG (1 << 11) #define NEPT_2M_ENTRY_FLAG (1 << 10) @@ -126,8 +126,9 @@ static bool_t nept_present_check(uint64_t entry) uint64_t nept_get_ept_vpid_cap(void) { - /*TODO: exposed ept and vpid features*/ - return NEPT_VPID_CAP_BITS; + if (cpu_has_vmx_ept && cpu_has_vmx_vpid) + return NEPT_VPID_CAP_BITS; + return 0; } static uint32_t diff --git a/xen/include/asm-x86/hvm/vmx/vvmx.h b/xen/include/asm-x86/hvm/vmx/vvmx.h index fcdce62..1e7a6d7 100644 --- a/xen/include/asm-x86/hvm/vmx/vvmx.h +++ b/xen/include/asm-x86/hvm/vmx/vvmx.h @@ -37,6 +37,7 @@ struct nestedvmx { uint32_t exit_reason; uint32_t exit_qual; } ept; + uint32_t guest_vpid; }; #define vcpu_2_nvmx(v) (vcpu_nestedhvm(v).u.nvmx) @@ -191,6 +192,7 @@ int nvmx_handle_vmwrite(struct cpu_user_regs *regs); int nvmx_handle_vmresume(struct cpu_user_regs *regs); int nvmx_handle_vmlaunch(struct cpu_user_regs *regs); int nvmx_handle_invept(struct cpu_user_regs *regs); +int nvmx_handle_invvpid(struct cpu_user_regs *regs); int nvmx_msr_read_intercept(unsigned int msr, u64 *msr_content); int nvmx_msr_write_intercept(unsigned int msr, -- 1.7.1 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |