[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 10/20] x86/shadow: Alter sh_put_ref() and shadow destroy functions to take a domain



Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Tim Deegan <tim@xxxxxxx>
---
 xen/arch/x86/mm/shadow/common.c  |   19 +++++++++----------
 xen/arch/x86/mm/shadow/multi.c   |   30 +++++++++++++-----------------
 xen/arch/x86/mm/shadow/multi.h   |    8 ++++----
 xen/arch/x86/mm/shadow/private.h |    9 ++++-----
 4 files changed, 30 insertions(+), 36 deletions(-)

diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index c6b8e6f..e2ea6cb 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -2052,9 +2052,8 @@ static void hash_vcpu_foreach(struct vcpu *v, unsigned 
int callback_mask,
  * which will decrement refcounts appropriately and return memory to the
  * free pool. */
 
-void sh_destroy_shadow(struct vcpu *v, mfn_t smfn)
+void sh_destroy_shadow(struct domain *d, mfn_t smfn)
 {
-    struct domain *d = v->domain;
     struct page_info *sp = mfn_to_page(smfn);
     unsigned int t = sp->u.sh.type;
 
@@ -2076,36 +2075,36 @@ void sh_destroy_shadow(struct vcpu *v, mfn_t smfn)
     {
     case SH_type_l1_32_shadow:
     case SH_type_fl1_32_shadow:
-        SHADOW_INTERNAL_NAME(sh_destroy_l1_shadow, 2)(v, smfn);
+        SHADOW_INTERNAL_NAME(sh_destroy_l1_shadow, 2)(d, smfn);
         break;
     case SH_type_l2_32_shadow:
-        SHADOW_INTERNAL_NAME(sh_destroy_l2_shadow, 2)(v, smfn);
+        SHADOW_INTERNAL_NAME(sh_destroy_l2_shadow, 2)(d, smfn);
         break;
 
     case SH_type_l1_pae_shadow:
     case SH_type_fl1_pae_shadow:
-        SHADOW_INTERNAL_NAME(sh_destroy_l1_shadow, 3)(v, smfn);
+        SHADOW_INTERNAL_NAME(sh_destroy_l1_shadow, 3)(d, smfn);
         break;
     case SH_type_l2_pae_shadow:
     case SH_type_l2h_pae_shadow:
-        SHADOW_INTERNAL_NAME(sh_destroy_l2_shadow, 3)(v, smfn);
+        SHADOW_INTERNAL_NAME(sh_destroy_l2_shadow, 3)(d, smfn);
         break;
 
     case SH_type_l1_64_shadow:
     case SH_type_fl1_64_shadow:
-        SHADOW_INTERNAL_NAME(sh_destroy_l1_shadow, 4)(v, smfn);
+        SHADOW_INTERNAL_NAME(sh_destroy_l1_shadow, 4)(d, smfn);
         break;
     case SH_type_l2h_64_shadow:
         ASSERT(is_pv_32on64_domain(d));
         /* Fall through... */
     case SH_type_l2_64_shadow:
-        SHADOW_INTERNAL_NAME(sh_destroy_l2_shadow, 4)(v, smfn);
+        SHADOW_INTERNAL_NAME(sh_destroy_l2_shadow, 4)(d, smfn);
         break;
     case SH_type_l3_64_shadow:
-        SHADOW_INTERNAL_NAME(sh_destroy_l3_shadow, 4)(v, smfn);
+        SHADOW_INTERNAL_NAME(sh_destroy_l3_shadow, 4)(d, smfn);
         break;
     case SH_type_l4_64_shadow:
-        SHADOW_INTERNAL_NAME(sh_destroy_l4_shadow, 4)(v, smfn);
+        SHADOW_INTERNAL_NAME(sh_destroy_l4_shadow, 4)(d, smfn);
         break;
 
     default:
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index f2dea16..7d82d90 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -931,7 +931,7 @@ static int shadow_set_l4e(struct vcpu *v,
         {
             flags |= SHADOW_SET_FLUSH;
         }
-        sh_put_ref(v, osl3mfn, paddr);
+        sh_put_ref(d, osl3mfn, paddr);
     }
     return flags;
 }
@@ -977,7 +977,7 @@ static int shadow_set_l3e(struct vcpu *v,
         {
             flags |= SHADOW_SET_FLUSH;
         }
-        sh_put_ref(v, osl2mfn, paddr);
+        sh_put_ref(d, osl2mfn, paddr);
     }
     return flags;
 }
@@ -1063,7 +1063,7 @@ static int shadow_set_l2e(struct vcpu *v,
         {
             flags |= SHADOW_SET_FLUSH;
         }
-        sh_put_ref(v, osl1mfn, paddr);
+        sh_put_ref(d, osl1mfn, paddr);
     }
     return flags;
 }
@@ -1882,9 +1882,8 @@ static shadow_l1e_t * shadow_get_and_create_l1e(struct 
vcpu *v,
  */
 
 #if GUEST_PAGING_LEVELS >= 4
-void sh_destroy_l4_shadow(struct vcpu *v, mfn_t smfn)
+void sh_destroy_l4_shadow(struct domain *d, mfn_t smfn)
 {
-    struct domain *d = v->domain;
     shadow_l4e_t *sl4e;
     struct page_info *sp = mfn_to_page(smfn);
     u32 t = sp->u.sh.type;
@@ -1904,7 +1903,7 @@ void sh_destroy_l4_shadow(struct vcpu *v, mfn_t smfn)
     SHADOW_FOREACH_L4E(sl4mfn, sl4e, 0, 0, d, {
         if ( shadow_l4e_get_flags(*sl4e) & _PAGE_PRESENT )
         {
-            sh_put_ref(v, shadow_l4e_get_mfn(*sl4e),
+            sh_put_ref(d, shadow_l4e_get_mfn(*sl4e),
                        (((paddr_t)mfn_x(sl4mfn)) << PAGE_SHIFT)
                        | ((unsigned long)sl4e & ~PAGE_MASK));
         }
@@ -1914,9 +1913,8 @@ void sh_destroy_l4_shadow(struct vcpu *v, mfn_t smfn)
     shadow_free(d, smfn);
 }
 
-void sh_destroy_l3_shadow(struct vcpu *v, mfn_t smfn)
+void sh_destroy_l3_shadow(struct domain *d, mfn_t smfn)
 {
-    struct domain *d = v->domain;
     shadow_l3e_t *sl3e;
     struct page_info *sp = mfn_to_page(smfn);
     u32 t = sp->u.sh.type;
@@ -1936,7 +1934,7 @@ void sh_destroy_l3_shadow(struct vcpu *v, mfn_t smfn)
     sl3mfn = smfn;
     SHADOW_FOREACH_L3E(sl3mfn, sl3e, 0, 0, {
         if ( shadow_l3e_get_flags(*sl3e) & _PAGE_PRESENT )
-            sh_put_ref(v, shadow_l3e_get_mfn(*sl3e),
+            sh_put_ref(d, shadow_l3e_get_mfn(*sl3e),
                         (((paddr_t)mfn_x(sl3mfn)) << PAGE_SHIFT)
                         | ((unsigned long)sl3e & ~PAGE_MASK));
     });
@@ -1947,9 +1945,8 @@ void sh_destroy_l3_shadow(struct vcpu *v, mfn_t smfn)
 #endif /* GUEST_PAGING_LEVELS >= 4 */
 
 
-void sh_destroy_l2_shadow(struct vcpu *v, mfn_t smfn)
+void sh_destroy_l2_shadow(struct domain *d, mfn_t smfn)
 {
-    struct domain *d = v->domain;
     shadow_l2e_t *sl2e;
     struct page_info *sp = mfn_to_page(smfn);
     u32 t = sp->u.sh.type;
@@ -1974,7 +1971,7 @@ void sh_destroy_l2_shadow(struct vcpu *v, mfn_t smfn)
     sl2mfn = smfn;
     SHADOW_FOREACH_L2E(sl2mfn, sl2e, 0, 0, d, {
         if ( shadow_l2e_get_flags(*sl2e) & _PAGE_PRESENT )
-            sh_put_ref(v, shadow_l2e_get_mfn(*sl2e),
+            sh_put_ref(d, shadow_l2e_get_mfn(*sl2e),
                         (((paddr_t)mfn_x(sl2mfn)) << PAGE_SHIFT)
                         | ((unsigned long)sl2e & ~PAGE_MASK));
     });
@@ -1983,9 +1980,8 @@ void sh_destroy_l2_shadow(struct vcpu *v, mfn_t smfn)
     shadow_free(d, smfn);
 }
 
-void sh_destroy_l1_shadow(struct vcpu *v, mfn_t smfn)
+void sh_destroy_l1_shadow(struct domain *d, mfn_t smfn)
 {
-    struct domain *d = v->domain;
     shadow_l1e_t *sl1e;
     struct page_info *sp = mfn_to_page(smfn);
     u32 t = sp->u.sh.type;
@@ -3799,6 +3795,7 @@ sh_update_linear_entries(struct vcpu *v)
 static void
 sh_detach_old_tables(struct vcpu *v)
 {
+    struct domain *d = v->domain;
     mfn_t smfn;
     int i = 0;
 
@@ -3812,7 +3809,6 @@ sh_detach_old_tables(struct vcpu *v)
 #else
     if ( v->arch.paging.shadow.guest_vtable )
     {
-        struct domain *d = v->domain;
         if ( shadow_mode_external(d) || shadow_mode_translate(d) )
             sh_unmap_domain_page_global(v->arch.paging.shadow.guest_vtable);
         v->arch.paging.shadow.guest_vtable = NULL;
@@ -3831,7 +3827,7 @@ sh_detach_old_tables(struct vcpu *v)
     {
         smfn = pagetable_get_mfn(v->arch.shadow_table[i]);
         if ( mfn_x(smfn) )
-            sh_put_ref(v, smfn, 0);
+            sh_put_ref(d, smfn, 0);
         v->arch.shadow_table[i] = pagetable_null();
     }
 }
@@ -3904,7 +3900,7 @@ sh_set_toplevel_shadow(struct vcpu *v,
             SHADOW_ERROR("can't re-pin %#lx\n", mfn_x(old_smfn));
             domain_crash(d);
         }
-        sh_put_ref(v, old_smfn, 0);
+        sh_put_ref(d, old_smfn, 0);
     }
 }
 
diff --git a/xen/arch/x86/mm/shadow/multi.h b/xen/arch/x86/mm/shadow/multi.h
index 7f829fd..614103d 100644
--- a/xen/arch/x86/mm/shadow/multi.h
+++ b/xen/arch/x86/mm/shadow/multi.h
@@ -39,16 +39,16 @@ SHADOW_INTERNAL_NAME(sh_map_and_validate_gl4e, 
GUEST_LEVELS)(
 
 extern void
 SHADOW_INTERNAL_NAME(sh_destroy_l1_shadow, GUEST_LEVELS)(
-    struct vcpu *v, mfn_t smfn);
+    struct domain *d, mfn_t smfn);
 extern void
 SHADOW_INTERNAL_NAME(sh_destroy_l2_shadow, GUEST_LEVELS)(
-    struct vcpu *v, mfn_t smfn);
+    struct domain *d, mfn_t smfn);
 extern void
 SHADOW_INTERNAL_NAME(sh_destroy_l3_shadow, GUEST_LEVELS)(
-    struct vcpu *v, mfn_t smfn);
+    struct domain *d, mfn_t smfn);
 extern void
 SHADOW_INTERNAL_NAME(sh_destroy_l4_shadow, GUEST_LEVELS)(
-    struct vcpu *v, mfn_t smfn);
+    struct domain *d, mfn_t smfn);
 
 extern void
 SHADOW_INTERNAL_NAME(sh_unhook_32b_mappings, GUEST_LEVELS)
diff --git a/xen/arch/x86/mm/shadow/private.h b/xen/arch/x86/mm/shadow/private.h
index 3820d9e..a848c94 100644
--- a/xen/arch/x86/mm/shadow/private.h
+++ b/xen/arch/x86/mm/shadow/private.h
@@ -532,7 +532,7 @@ sh_unmap_domain_page_global(void *p)
 /**************************************************************************/
 /* Shadow-page refcounting. */
 
-void sh_destroy_shadow(struct vcpu *v, mfn_t smfn);
+void sh_destroy_shadow(struct domain *d, mfn_t smfn);
 
 /* Increase the refcount of a shadow page.  Arguments are the mfn to refcount,
  * and the physical address of the shadow entry that holds the ref (or zero
@@ -572,9 +572,8 @@ static inline int sh_get_ref(struct vcpu *v, mfn_t smfn, 
paddr_t entry_pa)
 
 /* Decrease the refcount of a shadow page.  As for get_ref, takes the
  * physical address of the shadow entry that held this reference. */
-static inline void sh_put_ref(struct vcpu *v, mfn_t smfn, paddr_t entry_pa)
+static inline void sh_put_ref(struct domain *d, mfn_t smfn, paddr_t entry_pa)
 {
-    struct domain *d = v->domain;
     u32 x, nx;
     struct page_info *sp = mfn_to_page(smfn);
 
@@ -602,7 +601,7 @@ static inline void sh_put_ref(struct vcpu *v, mfn_t smfn, 
paddr_t entry_pa)
     sp->u.sh.count = nx;
 
     if ( unlikely(nx == 0) )
-        sh_destroy_shadow(v, smfn);
+        sh_destroy_shadow(d, smfn);
 }
 
 
@@ -728,7 +727,7 @@ static inline void sh_unpin(struct vcpu *v, mfn_t smfn)
     }
     sh_terminate_list(&tmp_list);
 
-    sh_put_ref(v, smfn, 0);
+    sh_put_ref(d, smfn, 0);
 }
 
 
-- 
1.7.10.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.