[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 2/2] x86/shadow: Move shadow pagetable fields into struct shadow_vcpu



The vTLB and last_write* booleans are used exclusively by the shadow pagetable
code.  Move them from paging_vcpu to shadow_vcpu, which causes them to be
entirely omitted on a build without shadow paging support.

While changing the qualified names of these variables, drop an unnessary NULL
check before freeing the vTLB, and move allocation of the vTLB from
sh_update_paging_modes() to shadow_vcpu_init() where it more logically
belongs.

Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Tim Deegan <tim@xxxxxxx>
CC: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
---
 xen/arch/x86/mm/shadow/common.c  | 35 ++++++++++++-----------------------
 xen/arch/x86/mm/shadow/multi.c   | 22 +++++++++++-----------
 xen/arch/x86/mm/shadow/private.h | 24 ++++++++++++------------
 xen/include/asm-x86/domain.h     | 18 ++++++++++--------
 4 files changed, 45 insertions(+), 54 deletions(-)

diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index aa0b8f0..1075d56 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -95,6 +95,14 @@ int shadow_vcpu_init(struct vcpu *v)
     }
 #endif
 
+#if (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB)
+    /* Allocate a virtual TLB for this vcpu. */
+    v->arch.paging.shadow.vtlb = xzalloc_array(struct shadow_vtlb, 
VTLB_ENTRIES);
+    if ( !v->arch.paging.shadow.vtlb )
+        return -ENOMEM;
+    spin_lock_init(&v->arch.paging.shadow.vtlb_lock);
+#endif /* (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB) */
+
     v->arch.paging.mode = is_pv_vcpu(v) ?
                           &SHADOW_INTERNAL_NAME(sh_paging_mode, 4) :
                           &SHADOW_INTERNAL_NAME(sh_paging_mode, 3);
@@ -1459,7 +1467,7 @@ void shadow_free(struct domain *d, mfn_t smfn)
                 v->arch.paging.shadow.last_writeable_pte_smfn = 0;
 #endif
 #if SHADOW_OPTIMIZATIONS & SHOPT_FAST_EMULATION
-            v->arch.paging.last_write_emul_ok = 0;
+            v->arch.paging.shadow.last_write_emul_ok = 0;
 #endif
         }
 #endif
@@ -1680,7 +1688,7 @@ static mfn_t emulate_gva_to_mfn(struct vcpu *v, unsigned 
long vaddr,
     mfn = page_to_mfn(page);
     ASSERT(mfn_valid(mfn));
 
-    v->arch.paging.last_write_was_pt = !!sh_mfn_is_a_page_table(mfn);
+    v->arch.paging.shadow.last_write_was_pt = !!sh_mfn_is_a_page_table(mfn);
     /*
      * Note shadow cannot page out or unshare this mfn, so the map won't
      * disappear. Otherwise, caller must hold onto page until done.
@@ -2864,22 +2872,6 @@ static void sh_update_paging_modes(struct vcpu *v)
 
     ASSERT(paging_locked_by_me(d));
 
-#if (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB)
-    /* Make sure this vcpu has a virtual TLB array allocated */
-    if ( unlikely(!v->arch.paging.vtlb) )
-    {
-        v->arch.paging.vtlb = xzalloc_array(struct shadow_vtlb, VTLB_ENTRIES);
-        if ( unlikely(!v->arch.paging.vtlb) )
-        {
-            SHADOW_ERROR("Could not allocate vTLB space for dom %u vcpu %u\n",
-                         d->domain_id, v->vcpu_id);
-            domain_crash(v->domain);
-            return;
-        }
-        spin_lock_init(&v->arch.paging.vtlb_lock);
-    }
-#endif /* (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB) */
-
 #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
     if ( mfn_eq(v->arch.paging.shadow.oos_snapshot[0], INVALID_MFN) )
     {
@@ -3206,11 +3198,8 @@ void shadow_teardown(struct domain *d, bool *preempted)
     for_each_vcpu(d, v)
     {
 #if (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB)
-        if ( v->arch.paging.vtlb )
-        {
-            xfree(v->arch.paging.vtlb);
-            v->arch.paging.vtlb = NULL;
-        }
+        xfree(v->arch.paging.shadow.vtlb);
+        v->arch.paging.shadow.vtlb = NULL;
 #endif /* (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB) */
 
 #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index d4090d7..20db60f 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -2881,7 +2881,7 @@ static int sh_page_fault(struct vcpu *v,
      * it's highly likely to reach same emulation action for this frame.
      * Then try to emulate early to avoid lock aquisition.
      */
-    if ( v->arch.paging.last_write_emul_ok
+    if ( v->arch.paging.shadow.last_write_emul_ok
          && v->arch.paging.shadow.last_emulated_frame == (va >> PAGE_SHIFT) )
     {
         /* check whether error code is 3, or else fall back to normal path
@@ -2898,7 +2898,7 @@ static int sh_page_fault(struct vcpu *v,
             if ( mfn_valid(gmfn) && mfn_is_out_of_sync(gmfn) )
             {
                 fast_emul = 0;
-                v->arch.paging.last_write_emul_ok = 0;
+                v->arch.paging.shadow.last_write_emul_ok = 0;
                 goto page_fault_slow_path;
             }
 #endif /* OOS */
@@ -2907,7 +2907,7 @@ static int sh_page_fault(struct vcpu *v,
             goto early_emulation;
         }
         else
-            v->arch.paging.last_write_emul_ok = 0;
+            v->arch.paging.shadow.last_write_emul_ok = 0;
     }
 #endif
 
@@ -3344,7 +3344,7 @@ static int sh_page_fault(struct vcpu *v,
             if ( fast_emul )
             {
                 perfc_incr(shadow_fault_fast_emulate_fail);
-                v->arch.paging.last_write_emul_ok = 0;
+                v->arch.paging.shadow.last_write_emul_ok = 0;
             }
 #endif
             gdprintk(XENLOG_DEBUG, "write to pagetable during event "
@@ -3399,7 +3399,7 @@ static int sh_page_fault(struct vcpu *v,
         if ( fast_emul )
         {
             perfc_incr(shadow_fault_fast_emulate_fail);
-            v->arch.paging.last_write_emul_ok = 0;
+            v->arch.paging.shadow.last_write_emul_ok = 0;
         }
 #endif
         SHADOW_PRINTK("emulator failure, unshadowing mfn %#lx\n",
@@ -3429,11 +3429,11 @@ static int sh_page_fault(struct vcpu *v,
         {
             v->arch.paging.shadow.last_emulated_frame = va >> PAGE_SHIFT;
             v->arch.paging.shadow.last_emulated_mfn = mfn_x(gmfn);
-            v->arch.paging.last_write_emul_ok = 1;
+            v->arch.paging.shadow.last_write_emul_ok = 1;
         }
     }
     else if ( fast_emul )
-        v->arch.paging.last_write_emul_ok = 0;
+        v->arch.paging.shadow.last_write_emul_ok = 0;
 #endif
 
     if ( emul_ctxt.ctxt.retire.singlestep )
@@ -3452,7 +3452,7 @@ static int sh_page_fault(struct vcpu *v,
         for ( i = 0 ; i < 4 ; i++ )
         {
             shadow_continue_emulation(&emul_ctxt, regs);
-            v->arch.paging.last_write_was_pt = 0;
+            v->arch.paging.shadow.last_write_was_pt = 0;
             r = x86_emulate(&emul_ctxt.ctxt, emul_ops);
 
             /*
@@ -3463,7 +3463,7 @@ static int sh_page_fault(struct vcpu *v,
             if ( r == X86EMUL_OKAY && !emul_ctxt.ctxt.retire.raw )
             {
                 emulation_count++;
-                if ( v->arch.paging.last_write_was_pt )
+                if ( v->arch.paging.shadow.last_write_was_pt )
                 {
                     perfc_incr(shadow_em_ex_pt);
                     
TRACE_SHADOW_PATH_FLAG(TRCE_SFLAG_EMULATION_2ND_PT_WRITTEN);
@@ -3539,7 +3539,7 @@ static bool_t sh_invlpg(struct vcpu *v, unsigned long va)
 #endif
 
 #if SHADOW_OPTIMIZATIONS & SHOPT_FAST_EMULATION
-    v->arch.paging.last_write_emul_ok = 0;
+    v->arch.paging.shadow.last_write_emul_ok = 0;
 #endif
 
     /* First check that we can safely read the shadow l2e.  SMP/PAE linux can
@@ -4232,7 +4232,7 @@ sh_update_cr3(struct vcpu *v, int do_locking)
 #endif
 
 #if SHADOW_OPTIMIZATIONS & SHOPT_FAST_EMULATION
-    v->arch.paging.last_write_emul_ok = 0;
+    v->arch.paging.shadow.last_write_emul_ok = 0;
 #endif
 
 #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
diff --git a/xen/arch/x86/mm/shadow/private.h b/xen/arch/x86/mm/shadow/private.h
index f0b0ed4..5649e81 100644
--- a/xen/arch/x86/mm/shadow/private.h
+++ b/xen/arch/x86/mm/shadow/private.h
@@ -768,9 +768,9 @@ struct shadow_vtlb {
 /* Call whenever the guest flushes hit actual TLB */
 static inline void vtlb_flush(struct vcpu *v)
 {
-    spin_lock(&v->arch.paging.vtlb_lock);
-    memset(v->arch.paging.vtlb, 0, VTLB_ENTRIES * sizeof (struct shadow_vtlb));
-    spin_unlock(&v->arch.paging.vtlb_lock);
+    spin_lock(&v->arch.paging.shadow.vtlb_lock);
+    memset(v->arch.paging.shadow.vtlb, 0, VTLB_ENTRIES * sizeof(struct 
shadow_vtlb));
+    spin_unlock(&v->arch.paging.shadow.vtlb_lock);
 }
 
 static inline int vtlb_hash(unsigned long page_number)
@@ -784,9 +784,9 @@ static inline void vtlb_insert(struct vcpu *v, unsigned 
long page,
 {
     struct shadow_vtlb entry =
         { .page_number = page, .frame_number = frame, .pfec = pfec };
-    spin_lock(&v->arch.paging.vtlb_lock);
-    v->arch.paging.vtlb[vtlb_hash(page)] = entry;
-    spin_unlock(&v->arch.paging.vtlb_lock);
+    spin_lock(&v->arch.paging.shadow.vtlb_lock);
+    v->arch.paging.shadow.vtlb[vtlb_hash(page)] = entry;
+    spin_unlock(&v->arch.paging.shadow.vtlb_lock);
 }
 
 /* Look a translation up in the vTLB.  Returns INVALID_GFN if not found. */
@@ -797,15 +797,15 @@ static inline unsigned long vtlb_lookup(struct vcpu *v,
     unsigned long frame_number = gfn_x(INVALID_GFN);
     int i = vtlb_hash(page_number);
 
-    spin_lock(&v->arch.paging.vtlb_lock);
-    if ( v->arch.paging.vtlb[i].pfec != 0
-         && v->arch.paging.vtlb[i].page_number == page_number
+    spin_lock(&v->arch.paging.shadow.vtlb_lock);
+    if ( v->arch.paging.shadow.vtlb[i].pfec != 0
+         && v->arch.paging.shadow.vtlb[i].page_number == page_number
          /* Any successful walk that had at least these pfec bits is OK */
-         && (v->arch.paging.vtlb[i].pfec & pfec) == pfec )
+         && (v->arch.paging.shadow.vtlb[i].pfec & pfec) == pfec )
     {
-        frame_number = v->arch.paging.vtlb[i].frame_number;
+        frame_number = v->arch.paging.shadow.vtlb[i].frame_number;
     }
-    spin_unlock(&v->arch.paging.vtlb_lock);
+    spin_unlock(&v->arch.paging.shadow.vtlb_lock);
     return frame_number;
 }
 #endif /* (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB) */
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index e6c7e13..3e7d791 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -147,7 +147,16 @@ struct shadow_vcpu {
         unsigned long off[SHADOW_OOS_FIXUPS];
     } oos_fixup[SHADOW_OOS_PAGES];
 
-    bool_t pagetable_dying;
+    /* Translated guest: virtual TLB */
+    struct shadow_vtlb *vtlb;
+    spinlock_t          vtlb_lock;
+
+    /* HVM guest: last emulate was to a pagetable */
+    bool last_write_was_pt;
+    /* HVM guest: last write emulation succeeds */
+    bool last_write_emul_ok;
+
+    bool pagetable_dying;
 #endif
 };
 
@@ -222,13 +231,6 @@ struct paging_vcpu {
     const struct paging_mode *mode;
     /* Nested Virtualization: paging mode of nested guest */
     const struct paging_mode *nestedmode;
-    /* HVM guest: last emulate was to a pagetable */
-    unsigned int last_write_was_pt:1;
-    /* HVM guest: last write emulation succeeds */
-    unsigned int last_write_emul_ok:1;
-    /* Translated guest: virtual TLB */
-    struct shadow_vtlb *vtlb;
-    spinlock_t          vtlb_lock;
 
     /* paging support extension */
     struct shadow_vcpu shadow;
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.