[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v1 11/14] x86/np2m: implement sharing of np2m between vCPUs



Modify p2m_get_nestedp2m() to allow sharing a np2m between multiple
vcpus with the same np2m_base (L1 np2m_base value in VMCX12).

np2m_schedule() callbacks are added to context_switch() as well as
pseudo schedule-out is performed during vvmx's virtual_vmexit().

Signed-off-by: Sergey Dyasli <sergey.dyasli@xxxxxxxxxx>
---
 xen/arch/x86/domain.c       |  2 ++
 xen/arch/x86/hvm/vmx/vvmx.c |  4 ++++
 xen/arch/x86/mm/p2m.c       | 29 +++++++++++++++++++++++++++--
 3 files changed, 33 insertions(+), 2 deletions(-)

diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index dbddc536d3..c8c26dad4e 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -1647,6 +1647,7 @@ void context_switch(struct vcpu *prev, struct vcpu *next)
     {
         _update_runstate_area(prev);
         vpmu_switch_from(prev);
+        np2m_schedule(NP2M_SCHEDLE_OUT);
     }
 
     if ( is_hvm_domain(prevd) && !list_empty(&prev->arch.hvm_vcpu.tm_list) )
@@ -1695,6 +1696,7 @@ void context_switch(struct vcpu *prev, struct vcpu *next)
 
         /* Must be done with interrupts enabled */
         vpmu_switch_to(next);
+        np2m_schedule(NP2M_SCHEDLE_IN);
     }
 
     /* Ensure that the vcpu has an up-to-date time base. */
diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
index 26ce349c76..49733af62b 100644
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -1201,6 +1201,7 @@ static void virtual_vmentry(struct cpu_user_regs *regs)
 
     /* Setup virtual ETP for L2 guest*/
     if ( nestedhvm_paging_mode_hap(v) )
+        /* This will setup the initial np2m for the nested vCPU */
         __vmwrite(EPT_POINTER, get_shadow_eptp(v));
     else
         __vmwrite(EPT_POINTER, get_host_eptp(v));
@@ -1367,6 +1368,9 @@ static void virtual_vmexit(struct cpu_user_regs *regs)
          !(v->arch.hvm_vcpu.guest_efer & EFER_LMA) )
         shadow_to_vvmcs_bulk(v, ARRAY_SIZE(gpdpte_fields), gpdpte_fields);
 
+    /* This will clear current pCPU bit in p2m->dirty_cpumask */
+    np2m_schedule(NP2M_SCHEDLE_OUT);
+
     vmx_vmcs_switch(v->arch.hvm_vmx.vmcs_pa, nvcpu->nv_n1vmcx_pa);
 
     nestedhvm_vcpu_exit_guestmode(v);
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 15dedef33b..d6a474fa20 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -1825,6 +1825,7 @@ p2m_get_nestedp2m_locked(struct vcpu *v)
     struct domain *d = v->domain;
     struct p2m_domain *p2m;
     uint64_t np2m_base = nhvm_vcpu_p2m_base(v);
+    unsigned int i;
 
     /* Mask out low bits; this avoids collisions with P2M_BASE_EADDR */
     np2m_base &= ~(0xfffull);
@@ -1838,10 +1839,34 @@ p2m_get_nestedp2m_locked(struct vcpu *v)
     if ( p2m ) 
     {
         p2m_lock(p2m);
-        if ( p2m->np2m_base == np2m_base || p2m->np2m_base == P2M_BASE_EADDR )
+        if ( p2m->np2m_base == np2m_base )
         {
-            if ( p2m->np2m_base == P2M_BASE_EADDR )
+            /* Check if np2m was flushed just before the lock */
+            if ( nv->np2m_generation != p2m->np2m_generation )
                 nvcpu_flush(v);
+            /* np2m is up-to-date */
+            p2m->np2m_base = np2m_base;
+            assign_np2m(v, p2m);
+            nestedp2m_unlock(d);
+
+            return p2m;
+        }
+        else if ( p2m->np2m_base != P2M_BASE_EADDR )
+        {
+            /* vCPU is switching from some other valid np2m */
+            cpumask_clear_cpu(v->processor, p2m->dirty_cpumask);
+        }
+        p2m_unlock(p2m);
+    }
+
+    /* Share a np2m if possible */
+    for ( i = 0; i < MAX_NESTEDP2M; i++ )
+    {
+        p2m = d->arch.nested_p2m[i];
+        p2m_lock(p2m);
+        if ( p2m->np2m_base == np2m_base )
+        {
+            nvcpu_flush(v);
             p2m->np2m_base = np2m_base;
             assign_np2m(v, p2m);
             nestedp2m_unlock(d);
-- 
2.11.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.