|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH v2 05/12] VMX/altp2m: add code to support EPTP switching and #VE.
On 22/06/15 19:56, Ed White wrote:
> Implement and hook up the code to enable VMX support of VMFUNC and #VE.
>
> VMFUNC leaf 0 (EPTP switching) emulation is added in a later patch.
>
> Signed-off-by: Ed White <edmund.h.white@xxxxxxxxx>
> ---
> xen/arch/x86/hvm/vmx/vmx.c | 132
> +++++++++++++++++++++++++++++++++++++++++++++
> 1 file changed, 132 insertions(+)
>
> diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
> index 2d3ad63..e8d9c82 100644
> --- a/xen/arch/x86/hvm/vmx/vmx.c
> +++ b/xen/arch/x86/hvm/vmx/vmx.c
> @@ -56,6 +56,7 @@
> #include <asm/debugger.h>
> #include <asm/apic.h>
> #include <asm/hvm/nestedhvm.h>
> +#include <asm/hvm/altp2mhvm.h>
> #include <asm/event.h>
> #include <asm/monitor.h>
> #include <public/arch-x86/cpuid.h>
> @@ -1763,6 +1764,100 @@ static void vmx_enable_msr_exit_interception(struct
> domain *d)
> MSR_TYPE_W);
> }
>
> +static void vmx_vcpu_update_eptp(struct vcpu *v)
> +{
> + struct domain *d = v->domain;
> + struct p2m_domain *p2m = NULL;
> + struct ept_data *ept;
> +
> + if ( altp2mhvm_active(d) )
> + p2m = p2m_get_altp2m(v);
> + if ( !p2m )
> + p2m = p2m_get_hostp2m(d);
> +
> + ept = &p2m->ept;
> + ept->asr = pagetable_get_pfn(p2m_get_pagetable(p2m));
> +
> + vmx_vmcs_enter(v);
> +
> + __vmwrite(EPT_POINTER, ept_get_eptp(ept));
> +
> + if ( v->arch.hvm_vmx.secondary_exec_control &
> + SECONDARY_EXEC_ENABLE_VIRT_EXCEPTIONS )
> + __vmwrite(EPTP_INDEX, vcpu_altp2mhvm(v).p2midx);
> +
> + vmx_vmcs_exit(v);
> +}
> +
> +static void vmx_vcpu_update_vmfunc_ve(struct vcpu *v)
> +{
> + struct domain *d = v->domain;
> + u32 mask = SECONDARY_EXEC_ENABLE_VM_FUNCTIONS;
> +
> + if ( !cpu_has_vmx_vmfunc )
> + return;
> +
> + if ( cpu_has_vmx_virt_exceptions )
> + mask |= SECONDARY_EXEC_ENABLE_VIRT_EXCEPTIONS;
> +
> + vmx_vmcs_enter(v);
> +
> + if ( !d->is_dying && altp2mhvm_active(d) )
> + {
> + v->arch.hvm_vmx.secondary_exec_control |= mask;
> + __vmwrite(VM_FUNCTION_CONTROL, VMX_VMFUNC_EPTP_SWITCHING);
> + __vmwrite(EPTP_LIST_ADDR, virt_to_maddr(d->arch.altp2m_eptp));
> +
> + if ( cpu_has_vmx_virt_exceptions )
> + {
> + p2m_type_t t;
> + mfn_t mfn;
> +
> + mfn = get_gfn_query_unlocked(d, vcpu_altp2mhvm(v).veinfo_gfn,
> &t);
get_gfn_query_unlocked() returns _mfn(INVALID_MFN) in the failure case,
which you must not blindly write back.
> + __vmwrite(VIRT_EXCEPTION_INFO, mfn_x(mfn) << PAGE_SHIFT);
pfn_to_paddr() please, rather than opencoding it. (This is a helper
which needs cleaning up, name-wise).
> + }
> + }
> + else
> + v->arch.hvm_vmx.secondary_exec_control &= ~mask;
> +
> + __vmwrite(SECONDARY_VM_EXEC_CONTROL,
> + v->arch.hvm_vmx.secondary_exec_control);
> +
> + vmx_vmcs_exit(v);
> +}
> +
> +static bool_t vmx_vcpu_emulate_ve(struct vcpu *v)
> +{
> + bool_t rc = 0;
> + ve_info_t *veinfo = vcpu_altp2mhvm(v).veinfo_gfn ?
> + hvm_map_guest_frame_rw(vcpu_altp2mhvm(v).veinfo_gfn, 0) : NULL;
gfn 0 is a valid (albeit unlikely) location to request the veinfo page.
Use GFN_INVALID as the sentinel.
> +
> + if ( !veinfo )
> + return 0;
> +
> + if ( veinfo->semaphore != 0 )
> + goto out;
The semantics of this semaphore are not clearly spelled out in the
manual. The only information I can locate concerning this field is in
note in 25.5.6.1 which says:
"Delivery of virtualization exceptions writes the value FFFFFFFFH to
offset 4 in the virtualization-exception informa-
tion area (see Section 25.5.6.2). Thus, once a virtualization exception
occurs, another can occur only if software
clears this field."
I presume this should be taken to mean "software writes 0 to this
field", but some clarification would be nice.
> +
> + rc = 1;
> +
> + veinfo->exit_reason = EXIT_REASON_EPT_VIOLATION;
> + veinfo->semaphore = ~0l;
semaphore is declared as an unsigned field, so should use ~0u.
> + veinfo->eptp_index = vcpu_altp2mhvm(v).p2midx;
> +
> + vmx_vmcs_enter(v);
> + __vmread(EXIT_QUALIFICATION, &veinfo->exit_qualification);
> + __vmread(GUEST_LINEAR_ADDRESS, &veinfo->gla);
> + __vmread(GUEST_PHYSICAL_ADDRESS, &veinfo->gpa);
> + vmx_vmcs_exit(v);
> +
> + hvm_inject_hw_exception(TRAP_virtualisation,
> + HVM_DELIVER_NO_ERROR_CODE);
> +
> +out:
> + hvm_unmap_guest_frame(veinfo, 0);
> + return rc;
> +}
> +
> static struct hvm_function_table __initdata vmx_function_table = {
> .name = "VMX",
> .cpu_up_prepare = vmx_cpu_up_prepare,
> @@ -1822,6 +1917,9 @@ static struct hvm_function_table __initdata
> vmx_function_table = {
> .nhvm_hap_walk_L1_p2m = nvmx_hap_walk_L1_p2m,
> .hypervisor_cpuid_leaf = vmx_hypervisor_cpuid_leaf,
> .enable_msr_exit_interception = vmx_enable_msr_exit_interception,
> + .ahvm_vcpu_update_eptp = vmx_vcpu_update_eptp,
> + .ahvm_vcpu_update_vmfunc_ve = vmx_vcpu_update_vmfunc_ve,
> + .ahvm_vcpu_emulate_ve = vmx_vcpu_emulate_ve,
> };
>
> const struct hvm_function_table * __init start_vmx(void)
> @@ -2754,6 +2852,40 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
>
> /* Now enable interrupts so it's safe to take locks. */
> local_irq_enable();
> +
> + /*
> + * If the guest has the ability to switch EPTP without an exit,
> + * figure out whether it has done so and update the altp2m data.
> + */
> + if ( altp2mhvm_active(v->domain) &&
> + (v->arch.hvm_vmx.secondary_exec_control &
> + SECONDARY_EXEC_ENABLE_VM_FUNCTIONS) )
> + {
> + unsigned long idx;
> +
> + if ( v->arch.hvm_vmx.secondary_exec_control &
> + SECONDARY_EXEC_ENABLE_VIRT_EXCEPTIONS )
> + __vmread(EPTP_INDEX, &idx);
> + else
> + {
> + unsigned long eptp;
> +
> + __vmread(EPT_POINTER, &eptp);
> +
> + if ( !p2m_find_altp2m_by_eptp(v->domain, eptp, &idx) )
> + {
> + gdprintk(XENLOG_ERR, "EPTP not found in alternate p2m
> list\n");
> + domain_crash(v->domain);
> + }
> + }
> +
Is it worth checking that idx is plausible at this point, before blindly
writing it back into the vcpu structure?
~Andrew
> + if ( (uint16_t)idx != vcpu_altp2mhvm(v).p2midx )
> + {
> + atomic_dec(&p2m_get_altp2m(v)->active_vcpus);
> + vcpu_altp2mhvm(v).p2midx = (uint16_t)idx;
> + atomic_inc(&p2m_get_altp2m(v)->active_vcpus);
> + }
> + }
>
> /* XXX: This looks ugly, but we need a mechanism to ensure
> * any pending vmresume has really happened
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |