[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH v5 06/15] VMX/altp2m: add code to support EPTP switching and #VE.
>From: Jan Beulich [mailto:JBeulich@xxxxxxxx] >Sent: Tuesday, July 14, 2015 6:57 AM > >>>> On 14.07.15 at 02:14, <edmund.h.white@xxxxxxxxx> wrote: >> Implement and hook up the code to enable VMX support of VMFUNC and >#VE. >> >> VMFUNC leaf 0 (EPTP switching) emulation is added in a later patch. >> >> Signed-off-by: Ed White <edmund.h.white@xxxxxxxxx> >> >> Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> >> Acked-by: Jun Nakajima <jun.nakajima@xxxxxxxxx> >> --- >> xen/arch/x86/hvm/vmx/vmx.c | 138 >> +++++++++++++++++++++++++++++++++++++++++++++ >> 1 file changed, 138 insertions(+) >> >> diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c >> index 07527dd..38dba6b 100644 >> --- a/xen/arch/x86/hvm/vmx/vmx.c >> +++ b/xen/arch/x86/hvm/vmx/vmx.c >> @@ -56,6 +56,7 @@ >> #include <asm/debugger.h> >> #include <asm/apic.h> >> #include <asm/hvm/nestedhvm.h> >> +#include <asm/hvm/altp2m.h> >> #include <asm/event.h> >> #include <asm/monitor.h> >> #include <public/arch-x86/cpuid.h> >> @@ -1763,6 +1764,104 @@ static void >> vmx_enable_msr_exit_interception(struct >> domain *d) >> MSR_TYPE_W); } >> >> +static void vmx_vcpu_update_eptp(struct vcpu *v) { >> + struct domain *d = v->domain; >> + struct p2m_domain *p2m = NULL; >> + struct ept_data *ept; >> + >> + if ( altp2m_active(d) ) >> + p2m = p2m_get_altp2m(v); >> + if ( !p2m ) >> + p2m = p2m_get_hostp2m(d); >> + >> + ept = &p2m->ept; >> + ept->asr = pagetable_get_pfn(p2m_get_pagetable(p2m)); >> + >> + vmx_vmcs_enter(v); >> + >> + __vmwrite(EPT_POINTER, ept_get_eptp(ept)); >> + >> + if ( v->arch.hvm_vmx.secondary_exec_control & >> + SECONDARY_EXEC_ENABLE_VIRT_EXCEPTIONS ) >> + __vmwrite(EPTP_INDEX, vcpu_altp2m(v).p2midx); >> + >> + vmx_vmcs_exit(v); >> +} >> + >> +static void vmx_vcpu_update_vmfunc_ve(struct vcpu *v) { >> + struct domain *d = v->domain; >> + u32 mask = SECONDARY_EXEC_ENABLE_VM_FUNCTIONS; >> + >> + if ( !cpu_has_vmx_vmfunc ) >> + return; >> + >> + if ( cpu_has_vmx_virt_exceptions ) >> + mask |= SECONDARY_EXEC_ENABLE_VIRT_EXCEPTIONS; >> + >> + vmx_vmcs_enter(v); >> + >> + if ( !d->is_dying && altp2m_active(d) ) >> + { >> + v->arch.hvm_vmx.secondary_exec_control |= mask; >> + __vmwrite(VM_FUNCTION_CONTROL, >VMX_VMFUNC_EPTP_SWITCHING); >> + __vmwrite(EPTP_LIST_ADDR, >> + virt_to_maddr(d->arch.altp2m_eptp)); >> + >> + if ( cpu_has_vmx_virt_exceptions ) >> + { >> + p2m_type_t t; >> + mfn_t mfn; >> + >> + mfn = get_gfn_query_unlocked(d, >> + gfn_x(vcpu_altp2m(v).veinfo_gfn), &t); >> + >> + if ( mfn_x(mfn) != INVALID_MFN ) >> + __vmwrite(VIRT_EXCEPTION_INFO, mfn_x(mfn) << PAGE_SHIFT); >> + else >> + mask &= ~SECONDARY_EXEC_ENABLE_VIRT_EXCEPTIONS; > >Considering the rest of the function, this is dead code. > Right, should be v->arch.hvm_vmx.secondary_exec_control &=... >> + } >> + } >> + else >> + v->arch.hvm_vmx.secondary_exec_control &= ~mask; >> + >> + __vmwrite(SECONDARY_VM_EXEC_CONTROL, >> + v->arch.hvm_vmx.secondary_exec_control); >> + >> + vmx_vmcs_exit(v); >> +} >> + >> +static bool_t vmx_vcpu_emulate_ve(struct vcpu *v) { >> + bool_t rc = 0; >> + ve_info_t *veinfo = gfn_x(vcpu_altp2m(v).veinfo_gfn) != INVALID_GFN >? >> + hvm_map_guest_frame_rw(gfn_x(vcpu_altp2m(v).veinfo_gfn), 0) : >> +NULL; >> + >> + if ( !veinfo ) >> + return 0; >> + >> + if ( veinfo->semaphore != 0 ) >> + goto out; >> + >> + rc = 1; >> + >> + veinfo->exit_reason = EXIT_REASON_EPT_VIOLATION; >> + veinfo->semaphore = ~0l; > >Isn't semaphore a 32-bit quantity? > Yes. >> + veinfo->eptp_index = vcpu_altp2m(v).p2midx; >> + >> + vmx_vmcs_enter(v); >> + __vmread(EXIT_QUALIFICATION, &veinfo->exit_qualification); >> + __vmread(GUEST_LINEAR_ADDRESS, &veinfo->gla); >> + __vmread(GUEST_PHYSICAL_ADDRESS, &veinfo->gpa); >> + vmx_vmcs_exit(v); >> + >> + hvm_inject_hw_exception(TRAP_virtualisation, >> + HVM_DELIVER_NO_ERROR_CODE); >> + >> +out: > >Un-indented label again. > Got it. >> @@ -2744,6 +2846,42 @@ void vmx_vmexit_handler(struct cpu_user_regs >*regs) >> /* Now enable interrupts so it's safe to take locks. */ >> local_irq_enable(); >> >> + /* >> + * If the guest has the ability to switch EPTP without an exit, >> + * figure out whether it has done so and update the altp2m data. >> + */ >> + if ( altp2m_active(v->domain) && >> + (v->arch.hvm_vmx.secondary_exec_control & >> + SECONDARY_EXEC_ENABLE_VM_FUNCTIONS) ) > >Indentation. > Yep. >> + { >> + unsigned long idx; >> + >> + if ( v->arch.hvm_vmx.secondary_exec_control & >> + SECONDARY_EXEC_ENABLE_VIRT_EXCEPTIONS ) >> + __vmread(EPTP_INDEX, &idx); >> + else >> + { >> + unsigned long eptp; >> + >> + __vmread(EPT_POINTER, &eptp); >> + >> + if ( (idx = p2m_find_altp2m_by_eptp(v->domain, eptp)) == >> + INVALID_ALTP2M ) >> + { >> + gdprintk(XENLOG_ERR, "EPTP not found in alternate p2m >> list\n"); >> + domain_crash(v->domain); >> + } >> + } >> + >> + if ( (uint16_t)idx != vcpu_altp2m(v).p2midx ) > >Is this cast really necessary? > Yes - The index is 16-bits, this reflects how the field is specified in the vmcs also. >> + { >> + BUG_ON(idx >= MAX_ALTP2M); >> + atomic_dec(&p2m_get_altp2m(v)->active_vcpus); >> + vcpu_altp2m(v).p2midx = (uint16_t)idx; > >This one surely isn't (or else the field type is wrong). > Again required. idx can't be uint16_t because __vmread() requires unsigned long*, but the index is 16 bits. Thanks, Ravi >Jan _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |