[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH 1/2] x86/ept: move vmx_domain_flush_pml_buffers() to p2m-ept.c



No functional change intended.

Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>
---
 xen/arch/x86/hvm/vmx/vmcs.c             | 59 +------------------------
 xen/arch/x86/hvm/vmx/vmx.c              |  2 +-
 xen/arch/x86/include/asm/hvm/vmx/vmcs.h |  1 -
 xen/arch/x86/include/asm/hvm/vmx/vmx.h  |  1 +
 xen/arch/x86/mm/p2m-ept.c               | 56 +++++++++++++++++++++++
 5 files changed, 60 insertions(+), 59 deletions(-)

diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
index e7fca4bf33db..ab8b1c87ec0f 100644
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -1670,7 +1670,7 @@ void vmx_vcpu_disable_pml(struct vcpu *v)
         return;
 
     /* Make sure we don't lose any logged GPAs. */
-    vmx_vcpu_flush_pml_buffer(v);
+    ept_vcpu_flush_pml_buffer(v);
 
     vmx_vmcs_enter(v);
 
@@ -1684,61 +1684,6 @@ void vmx_vcpu_disable_pml(struct vcpu *v)
     v->arch.hvm.vmx.pml_pg = NULL;
 }
 
-void vmx_vcpu_flush_pml_buffer(struct vcpu *v)
-{
-    uint64_t *pml_buf;
-    unsigned long pml_idx;
-
-    ASSERT((v == current) || (!vcpu_runnable(v) && !v->is_running));
-    ASSERT(vmx_vcpu_pml_enabled(v));
-
-    vmx_vmcs_enter(v);
-
-    __vmread(GUEST_PML_INDEX, &pml_idx);
-
-    /* Do nothing if PML buffer is empty. */
-    if ( pml_idx == (NR_PML_ENTRIES - 1) )
-        goto out;
-
-    pml_buf = __map_domain_page(v->arch.hvm.vmx.pml_pg);
-
-    /*
-     * PML index can be either 2^16-1 (buffer is full), or 0 ~ NR_PML_ENTRIES-1
-     * (buffer is not full), and in latter case PML index always points to next
-     * available entity.
-     */
-    if ( pml_idx >= NR_PML_ENTRIES )
-        pml_idx = 0;
-    else
-        pml_idx++;
-
-    for ( ; pml_idx < NR_PML_ENTRIES; pml_idx++ )
-    {
-        unsigned long gfn = pml_buf[pml_idx] >> PAGE_SHIFT;
-
-        /*
-         * Need to change type from log-dirty to normal memory for logged GFN.
-         * hap_track_dirty_vram depends on it to work. And we mark all logged
-         * GFNs to be dirty, as we cannot be sure whether it's safe to ignore
-         * GFNs on which p2m_change_type_one returns failure. The failure cases
-         * are very rare, and additional cost is negligible, but a missing mark
-         * is extremely difficult to debug.
-         */
-        p2m_change_type_one(v->domain, gfn, p2m_ram_logdirty, p2m_ram_rw);
-
-        /* HVM guest: pfn == gfn */
-        paging_mark_pfn_dirty(v->domain, _pfn(gfn));
-    }
-
-    unmap_domain_page(pml_buf);
-
-    /* Reset PML index */
-    __vmwrite(GUEST_PML_INDEX, NR_PML_ENTRIES - 1);
-
- out:
-    vmx_vmcs_exit(v);
-}
-
 bool vmx_domain_pml_enabled(const struct domain *d)
 {
     return d->arch.hvm.vmx.status & VMX_DOMAIN_PML_ENABLED;
@@ -1812,7 +1757,7 @@ void vmx_domain_flush_pml_buffers(struct domain *d)
         return;
 
     for_each_vcpu ( d, v )
-        vmx_vcpu_flush_pml_buffer(v);
+        ept_vcpu_flush_pml_buffer(v);
 }
 
 static void vmx_vcpu_update_eptp(struct vcpu *v, u64 eptp)
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 218cb2c1af84..ce538668c737 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -4787,7 +4787,7 @@ void asmlinkage vmx_vmexit_handler(struct cpu_user_regs 
*regs)
              !(idtv_info & INTR_INFO_VALID_MASK) )
             undo_nmis_unblocked_by_iret();
 
-        vmx_vcpu_flush_pml_buffer(v);
+        ept_vcpu_flush_pml_buffer(v);
         break;
 
     case EXIT_REASON_XSAVES:
diff --git a/xen/arch/x86/include/asm/hvm/vmx/vmcs.h 
b/xen/arch/x86/include/asm/hvm/vmx/vmcs.h
index ff5dd66b0ad9..ecd91389302c 100644
--- a/xen/arch/x86/include/asm/hvm/vmx/vmcs.h
+++ b/xen/arch/x86/include/asm/hvm/vmx/vmcs.h
@@ -734,7 +734,6 @@ DECLARE_PER_CPU(bool, vmxon);
 bool vmx_vcpu_pml_enabled(const struct vcpu *v);
 int vmx_vcpu_enable_pml(struct vcpu *v);
 void vmx_vcpu_disable_pml(struct vcpu *v);
-void vmx_vcpu_flush_pml_buffer(struct vcpu *v);
 bool vmx_domain_pml_enabled(const struct domain *d);
 int vmx_domain_enable_pml(struct domain *d);
 void vmx_domain_disable_pml(struct domain *d);
diff --git a/xen/arch/x86/include/asm/hvm/vmx/vmx.h 
b/xen/arch/x86/include/asm/hvm/vmx/vmx.h
index 56bea252cc5a..da04752e1752 100644
--- a/xen/arch/x86/include/asm/hvm/vmx/vmx.h
+++ b/xen/arch/x86/include/asm/hvm/vmx/vmx.h
@@ -491,6 +491,7 @@ void ept_walk_table(struct domain *d, unsigned long gfn);
 bool ept_handle_misconfig(uint64_t gpa);
 int epte_get_entry_emt(struct domain *d, gfn_t gfn, mfn_t mfn,
                        unsigned int order, bool *ipat, p2m_type_t type);
+void ept_vcpu_flush_pml_buffer(struct vcpu *v);
 void setup_ept_dump(void);
 /* Locate an alternate p2m by its EPTP */
 unsigned int p2m_find_altp2m_by_eptp(struct domain *d, uint64_t eptp);
diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
index 0cf6818c13f0..015911ba6c80 100644
--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -1373,6 +1373,62 @@ static void cf_check ept_flush_pml_buffers(struct 
p2m_domain *p2m)
     vmx_domain_flush_pml_buffers(p2m->domain);
 }
 
+void ept_vcpu_flush_pml_buffer(struct vcpu *v)
+{
+    uint64_t *pml_buf;
+    unsigned long pml_idx;
+
+    ASSERT((v == current) || (!vcpu_runnable(v) && !v->is_running));
+    ASSERT(vmx_vcpu_pml_enabled(v));
+
+    vmx_vmcs_enter(v);
+
+    __vmread(GUEST_PML_INDEX, &pml_idx);
+
+    /* Do nothing if PML buffer is empty. */
+    if ( pml_idx == (NR_PML_ENTRIES - 1) )
+        goto out;
+
+    pml_buf = __map_domain_page(v->arch.hvm.vmx.pml_pg);
+
+    /*
+     * PML index can be either 2^16-1 (buffer is full), or 0 ~ NR_PML_ENTRIES-1
+     * (buffer is not full), and in latter case PML index always points to next
+     * available entity.
+     */
+    if ( pml_idx >= NR_PML_ENTRIES )
+        pml_idx = 0;
+    else
+        pml_idx++;
+
+    for ( ; pml_idx < NR_PML_ENTRIES; pml_idx++ )
+    {
+        unsigned long gfn = pml_buf[pml_idx] >> PAGE_SHIFT;
+
+        /*
+         * Need to change type from log-dirty to normal memory for logged GFN.
+         * hap_track_dirty_vram depends on it to work. And we mark all loqgged
+         * GFNs to be dirty, as we cannot be sure whether it's safe to ignore
+         * GFNs on which p2m_change_type_one returns failure. The failure cases
+         * are very rare, and additional cost is negligible, but a missing mark
+         * is extremely difficult to debug.
+         */
+        p2m_change_type_one(v->domain, gfn, p2m_ram_logdirty, p2m_ram_rw);
+
+        /* HVM guest: pfn == gfn */
+        paging_mark_pfn_dirty(v->domain, _pfn(gfn));
+    }
+
+    unmap_domain_page(pml_buf);
+
+    /* Reset PML index */
+    __vmwrite(GUEST_PML_INDEX, NR_PML_ENTRIES - 1);
+
+ out:
+    vmx_vmcs_exit(v);
+
+}
+
 int ept_p2m_init(struct p2m_domain *p2m)
 {
     struct ept_data *ept = &p2m->ept;
-- 
2.49.0




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.