|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH 05/10] VMX: add help functions to support PML
On 27/03/15 02:35, Kai Huang wrote:
> This patch adds help functions to enable/disable PML, and flush PML buffer for
> single vcpu and particular domain for further use.
>
> Signed-off-by: Kai Huang <kai.huang@xxxxxxxxxxxxxxx>
> ---
> xen/arch/x86/hvm/vmx/vmcs.c | 190
> +++++++++++++++++++++++++++++++++++++
> xen/include/asm-x86/hvm/vmx/vmcs.h | 9 ++
> 2 files changed, 199 insertions(+)
>
> diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
> index 2798b0b..17cbef4 100644
> --- a/xen/arch/x86/hvm/vmx/vmcs.c
> +++ b/xen/arch/x86/hvm/vmx/vmcs.c
> @@ -1326,6 +1326,196 @@ void vmx_clear_eoi_exit_bitmap(struct vcpu *v, u8
> vector)
> &v->arch.hvm_vmx.eoi_exitmap_changed);
> }
>
> +int vmx_vcpu_pml_enabled(struct vcpu *v)
bool_t vmx_vcpu_pml_enabled(const struct vcpu *v)
> +{
> + return (v->arch.hvm_vmx.secondary_exec_control &
> + SECONDARY_EXEC_ENABLE_PML) ? 1 : 0;
This would be slightly shorter as
!!(v->arch.hvm_vmx.secondary_exec_control & SECONDARY_EXEC_ENABLE_PML)
> +}
> +
> +int vmx_vcpu_enable_pml(struct vcpu *v)
> +{
> + struct domain *d = v->domain;
> +
> + ASSERT(!vmx_vcpu_pml_enabled(v));
> +
> + v->arch.hvm_vmx.pml_pg = d->arch.paging.alloc_page(d);
> + if ( !v->arch.hvm_vmx.pml_pg )
> + return -ENOMEM;
> +
> + vmx_vmcs_enter(v);
> +
> + __vmwrite(PML_ADDRESS, page_to_mfn(v->arch.hvm_vmx.pml_pg) <<
> PAGE_SHIFT);
> + __vmwrite(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
> +
> + v->arch.hvm_vmx.secondary_exec_control |= SECONDARY_EXEC_ENABLE_PML;
> +
> + __vmwrite(SECONDARY_VM_EXEC_CONTROL,
> + v->arch.hvm_vmx.secondary_exec_control);
Alignment.
> +
> + vmx_vmcs_exit(v);
> +
> + return 0;
> +}
> +
> +void vmx_vcpu_disable_pml(struct vcpu *v)
> +{
> + ASSERT(vmx_vcpu_pml_enabled(v));
> +
> + vmx_vmcs_enter(v);
> +
> + v->arch.hvm_vmx.secondary_exec_control &= ~SECONDARY_EXEC_ENABLE_PML;
> + __vmwrite(SECONDARY_VM_EXEC_CONTROL,
> + v->arch.hvm_vmx.secondary_exec_control);
> +
> + vmx_vmcs_exit(v);
> +
> + v->domain->arch.paging.free_page(v->domain, v->arch.hvm_vmx.pml_pg);
> + v->arch.hvm_vmx.pml_pg = NULL;
> +}
> +
> +void vmx_vcpu_flush_pml_buffer(struct vcpu *v)
> +{
> + uint64_t *pml_buf;
> + unsigned long pml_idx;
> +
> + ASSERT(vmx_vcpu_pml_enabled(v));
> +
> + vmx_vmcs_enter(v);
> +
> + __vmread(GUEST_PML_INDEX, &pml_idx);
> +
> + /* Do nothing if PML buffer is empty */
> + if ( pml_idx == (PML_ENTITY_NUM - 1) )
> + goto out;
> +
> + pml_buf = map_domain_page(page_to_mfn(v->arch.hvm_vmx.pml_pg));
__map_domain_page() is a wrapper which takes a struct page_info
> +
> + /*
> + * PML index can be either 2^16-1 (buffer is full), or 0~511 (buffer is
> not
> + * full), and in latter case PML index always points to next available
> + * entity.
> + */
> + if (pml_idx >= PML_ENTITY_NUM)
> + pml_idx = 0;
> + else
> + pml_idx++;
> +
> + for ( ; pml_idx < PML_ENTITY_NUM; pml_idx++ )
> + {
> + struct p2m_domain *p2m = p2m_get_hostp2m(v->domain);
This p2m_get_host_p2m() call should be hoisted out of the loop.
> + unsigned long gfn;
> + mfn_t mfn;
> + p2m_type_t t;
> + p2m_access_t a;
> +
> + gfn = pml_buf[pml_idx] >> PAGE_SHIFT;
> + mfn = p2m->get_entry(p2m, gfn, &t, &a, 0, NULL);
> + if ( mfn_x(mfn) == INVALID_MFN )
> + {
> + /*
> + * Either EPT table entry for mapping the GFN has been
> destroyed, or
> + * there's something wrong with hardware behavior, in both cases
> we
> + * should report a warning.
> + */
> + dprintk(XENLOG_WARNING, "PML: vcpu %d: invalid GPA 0x%lx
> logged\n",
> + v->vcpu_id, pml_buf[pml_idx]);
It would be shorter to log gfn rather than gpa.
> + continue;
> + }
> +
> + /*
> + * Need to change type from log-dirty to normal memory for logged
> GFN.
> + * hap_track_dirty_vram depends on it to work. And we really only
> need
> + * to mark GFNs which hve been successfully changed from log-dirty to
> + * normal memory to be dirty.
> + */
> + if ( !p2m_change_type_one(v->domain, gfn, p2m_ram_logdirty,
> + p2m_ram_rw) )
> + paging_mark_dirty(v->domain, mfn_x(mfn));
> + }
> +
> + unmap_domain_page(pml_buf);
> +
> + /* Reset PML index */
> + __vmwrite(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
> +
> +out:
> + vmx_vmcs_exit(v);
> +}
> +
> +int vmx_domain_pml_enabled(struct domain *d)
bool_t and const as per vcpu variant.
> +{
> + return (d->arch.hvm_domain.vmx.status & VMX_DOMAIN_PML_ENABLED) ? 1 : 0;
> +}
> +
> +/*
> + * This function enables PML for particular domain. It should be called when
> + * domain is paused.
In which case assert that the domain is paused, or call domain_pause()
yourself to take an extra pause refcount.
> + *
> + * PML needs to be enabled globally for all vcpus of the domain, as PML
> buffer
> + * and PML index are pre-vcpu, but EPT table is shared by vcpus, therefore
> + * enabling PML on partial vcpus won't work.
> + */
> +int vmx_domain_enable_pml(struct domain *d)
> +{
> + struct vcpu *v;
> +
> + ASSERT(!vmx_domain_pml_enabled(d));
> +
> + for_each_vcpu( d, v )
> + {
> + if ( vmx_vcpu_enable_pml(v) )
> + goto error;
Please catch the actual rc from vmx_vcpu_enable_pml() and propagate out
of this function, rather than clobbering -ENOMEM with -EINVAL.
Also, per Xen style, you can drop the braces.
~Andrew
> + }
> +
> + d->arch.hvm_domain.vmx.status |= VMX_DOMAIN_PML_ENABLED;
> +
> + return 0;
> +
> +error:
> + for_each_vcpu( d, v )
> + {
> + if ( vmx_vcpu_pml_enabled(v) )
> + vmx_vcpu_disable_pml(v);
> + }
> + return -EINVAL;
> +}
> +
> +/*
> + * Disable PML for particular domain. Called when domain is paused.
> + *
> + * The same as enabling PML for domain, disabling PML should be done for all
> + * vcpus at once.
> + */
> +void vmx_domain_disable_pml(struct domain *d)
> +{
> + struct vcpu *v;
> +
> + ASSERT(vmx_domain_pml_enabled(d));
> +
> + for_each_vcpu( d, v )
> + {
> + vmx_vcpu_disable_pml(v);
> + }
> +
> + d->arch.hvm_domain.vmx.status &= ~VMX_DOMAIN_PML_ENABLED;
> +}
> +
> +/*
> + * Flush PML buffer of all vcpus, and update the logged dirty pages to
> log-dirty
> + * radix tree. Called when domain is paused.
> + */
> +void vmx_domain_flush_pml_buffers(struct domain *d)
> +{
> + struct vcpu *v;
> +
> + ASSERT(vmx_domain_pml_enabled(d));
> +
> + for_each_vcpu( d, v )
> + {
> + vmx_vcpu_flush_pml_buffer(v);
> + }
> +}
> +
> int vmx_create_vmcs(struct vcpu *v)
> {
> struct arch_vmx_struct *arch_vmx = &v->arch.hvm_vmx;
> diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h
> b/xen/include/asm-x86/hvm/vmx/vmcs.h
> index 8cc1122..939d097 100644
> --- a/xen/include/asm-x86/hvm/vmx/vmcs.h
> +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h
> @@ -499,6 +499,15 @@ static inline int vmx_add_host_load_msr(u32 msr)
>
> DECLARE_PER_CPU(bool_t, vmxon);
>
> +int vmx_vcpu_pml_enabled(struct vcpu *v);
> +int vmx_vcpu_enable_pml(struct vcpu *v);
> +void vmx_vcpu_disable_pml(struct vcpu *v);
> +void vmx_vcpu_flush_pml_buffer(struct vcpu *v);
> +int vmx_domain_pml_enabled(struct domain *d);
> +int vmx_domain_enable_pml(struct domain *d);
> +void vmx_domain_disable_pml(struct domain *d);
> +void vmx_domain_flush_pml_buffers(struct domain *d);
> +
> #endif /* ASM_X86_HVM_VMX_VMCS_H__ */
>
> /*
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |