[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 07/20] x86/shadow: Alter sh_type_{is_pinnable, has_up_pointer}() to take a domain



Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Tim Deegan <tim@xxxxxxx>
---
 xen/arch/x86/mm/shadow/common.c  |    7 ++++---
 xen/arch/x86/mm/shadow/multi.c   |   10 +++++-----
 xen/arch/x86/mm/shadow/private.h |   18 ++++++++++--------
 3 files changed, 19 insertions(+), 16 deletions(-)

diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index 80174df..bdb19fb 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -2472,6 +2472,7 @@ static int sh_remove_shadow_via_pointer(struct vcpu *v, 
mfn_t smfn)
 /* Follow this shadow's up-pointer, if it has one, and remove the reference
  * found there.  Returns 1 if that was the only reference to this shadow */
 {
+    struct domain *d = v->domain;
     struct page_info *sp = mfn_to_page(smfn);
     mfn_t pmfn;
     void *vaddr;
@@ -2479,7 +2480,7 @@ static int sh_remove_shadow_via_pointer(struct vcpu *v, 
mfn_t smfn)
 
     ASSERT(sp->u.sh.type > 0);
     ASSERT(sp->u.sh.type < SH_type_max_shadow);
-    ASSERT(sh_type_has_up_pointer(v, sp->u.sh.type));
+    ASSERT(sh_type_has_up_pointer(d, sp->u.sh.type));
 
     if (sp->up == 0) return 0;
     pmfn = _mfn(sp->up >> PAGE_SHIFT);
@@ -2616,9 +2617,9 @@ void sh_remove_shadows(struct vcpu *v, mfn_t gmfn, int 
fast, int all)
                      mfn_x(gmfn), (uint32_t)pg->shadow_flags, t);       \
         break;                                                          \
     }                                                                   \
-    if ( sh_type_is_pinnable(v, t) )                                    \
+    if ( sh_type_is_pinnable(d, t) )                                    \
         sh_unpin(v, smfn);                                              \
-    else if ( sh_type_has_up_pointer(v, t) )                            \
+    else if ( sh_type_has_up_pointer(d, t) )                            \
         sh_remove_shadow_via_pointer(v, smfn);                          \
     if( !fast                                                           \
         && (pg->count_info & PGC_page_table)                            \
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index 154274f..ea3b520 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -903,7 +903,7 @@ static int shadow_set_l4e(struct vcpu *v,
         mfn_t sl3mfn = shadow_l4e_get_mfn(new_sl4e);
         ok = sh_get_ref(v, sl3mfn, paddr);
         /* Are we pinning l3 shadows to handle wierd linux behaviour? */
-        if ( sh_type_is_pinnable(v, SH_type_l3_64_shadow) )
+        if ( sh_type_is_pinnable(d, SH_type_l3_64_shadow) )
             ok |= sh_pin(v, sl3mfn);
         if ( !ok )
         {
@@ -1501,7 +1501,7 @@ sh_make_shadow(struct vcpu *v, mfn_t gmfn, u32 
shadow_type)
     SHADOW_DEBUG(MAKE_SHADOW, "(%05lx, %u)=>%05lx\n",
                   mfn_x(gmfn), shadow_type, mfn_x(smfn));
 
-    if ( sh_type_has_up_pointer(v, shadow_type) )
+    if ( sh_type_has_up_pointer(d, shadow_type) )
         /* Lower-level shadow, not yet linked form a higher level */
         mfn_to_page(smfn)->up = 0;
 
@@ -2367,7 +2367,7 @@ int sh_safe_not_to_sync(struct vcpu *v, mfn_t gl1mfn)
     struct page_info *sp;
     mfn_t smfn;
 
-    if ( !sh_type_has_up_pointer(v, SH_type_l1_shadow) )
+    if ( !sh_type_has_up_pointer(d, SH_type_l1_shadow) )
         return 0;
 
     smfn = get_shadow_status(d, gl1mfn, SH_type_l1_shadow);
@@ -2383,7 +2383,7 @@ int sh_safe_not_to_sync(struct vcpu *v, mfn_t gl1mfn)
 #if (SHADOW_PAGING_LEVELS == 4)
     /* up to l3 */
     sp = mfn_to_page(smfn);
-    ASSERT(sh_type_has_up_pointer(v, SH_type_l2_shadow));
+    ASSERT(sh_type_has_up_pointer(d, SH_type_l2_shadow));
     if ( sp->u.sh.count != 1 || !sp->up )
         return 0;
     smfn = _mfn(sp->up >> PAGE_SHIFT);
@@ -2392,7 +2392,7 @@ int sh_safe_not_to_sync(struct vcpu *v, mfn_t gl1mfn)
     /* up to l4 */
     sp = mfn_to_page(smfn);
     if ( sp->u.sh.count != 1
-         || !sh_type_has_up_pointer(v, SH_type_l3_64_shadow) || !sp->up )
+         || !sh_type_has_up_pointer(d, SH_type_l3_64_shadow) || !sp->up )
         return 0;
     smfn = _mfn(sp->up >> PAGE_SHIFT);
     ASSERT(mfn_valid(smfn));
diff --git a/xen/arch/x86/mm/shadow/private.h b/xen/arch/x86/mm/shadow/private.h
index df1dd8c..8c06775 100644
--- a/xen/arch/x86/mm/shadow/private.h
+++ b/xen/arch/x86/mm/shadow/private.h
@@ -195,7 +195,7 @@ extern void shadow_audit_tables(struct vcpu *v);
  * What counts as a pinnable shadow?
  */
 
-static inline int sh_type_is_pinnable(struct vcpu *v, unsigned int t)
+static inline int sh_type_is_pinnable(struct domain *d, unsigned int t)
 {
     /* Top-level shadow types in each mode can be pinned, so that they
      * persist even when not currently in use in a guest CR3 */
@@ -211,7 +211,7 @@ static inline int sh_type_is_pinnable(struct vcpu *v, 
unsigned int t)
      * page.  When we're shadowing those kernels, we have to pin l3
      * shadows so they don't just evaporate on every context switch.
      * For all other guests, we'd rather use the up-pointer field in l3s. */
-    if ( unlikely((v->domain->arch.paging.shadow.opt_flags & 
SHOPT_LINUX_L3_TOPLEVEL)
+    if ( unlikely((d->arch.paging.shadow.opt_flags & SHOPT_LINUX_L3_TOPLEVEL)
                   && t == SH_type_l3_64_shadow) )
         return 1;
 #endif
@@ -220,7 +220,7 @@ static inline int sh_type_is_pinnable(struct vcpu *v, 
unsigned int t)
     return 0;
 }
 
-static inline int sh_type_has_up_pointer(struct vcpu *v, unsigned int t)
+static inline int sh_type_has_up_pointer(struct domain *d, unsigned int t)
 {
     /* Multi-page shadows don't have up-pointers */
     if ( t == SH_type_l1_32_shadow
@@ -228,7 +228,7 @@ static inline int sh_type_has_up_pointer(struct vcpu *v, 
unsigned int t)
          || t == SH_type_l2_32_shadow )
         return 0;
     /* Pinnable shadows don't have up-pointers either */
-    return !sh_type_is_pinnable(v, t);
+    return !sh_type_is_pinnable(d, t);
 }
 
 static inline void sh_terminate_list(struct page_list_head *tmp_list)
@@ -540,6 +540,7 @@ void sh_destroy_shadow(struct vcpu *v, mfn_t smfn);
  * Returns 0 for failure, 1 for success. */
 static inline int sh_get_ref(struct vcpu *v, mfn_t smfn, paddr_t entry_pa)
 {
+    struct domain *d = v->domain;
     u32 x, nx;
     struct page_info *sp = mfn_to_page(smfn);
 
@@ -561,7 +562,7 @@ static inline int sh_get_ref(struct vcpu *v, mfn_t smfn, 
paddr_t entry_pa)
 
     /* We remember the first shadow entry that points to each shadow. */
     if ( entry_pa != 0
-         && sh_type_has_up_pointer(v, sp->u.sh.type)
+         && sh_type_has_up_pointer(d, sp->u.sh.type)
          && sp->up == 0 )
         sp->up = entry_pa;
 
@@ -573,6 +574,7 @@ static inline int sh_get_ref(struct vcpu *v, mfn_t smfn, 
paddr_t entry_pa)
  * physical address of the shadow entry that held this reference. */
 static inline void sh_put_ref(struct vcpu *v, mfn_t smfn, paddr_t entry_pa)
 {
+    struct domain *d = v->domain;
     u32 x, nx;
     struct page_info *sp = mfn_to_page(smfn);
 
@@ -582,7 +584,7 @@ static inline void sh_put_ref(struct vcpu *v, mfn_t smfn, 
paddr_t entry_pa)
 
     /* If this is the entry in the up-pointer, remove it */
     if ( entry_pa != 0
-         && sh_type_has_up_pointer(v, sp->u.sh.type)
+         && sh_type_has_up_pointer(d, sp->u.sh.type)
          && sp->up == entry_pa )
         sp->up = 0;
 
@@ -656,7 +658,7 @@ static inline int sh_pin(struct vcpu *v, mfn_t smfn)
     sp[0] = mfn_to_page(smfn);
     pages = shadow_size(sp[0]->u.sh.type);
     already_pinned = sp[0]->u.sh.pinned;
-    ASSERT(sh_type_is_pinnable(v, sp[0]->u.sh.type));
+    ASSERT(sh_type_is_pinnable(d, sp[0]->u.sh.type));
     ASSERT(sp[0]->u.sh.head);
 
     pin_list = &d->arch.paging.shadow.pinned_shadows;
@@ -704,7 +706,7 @@ static inline void sh_unpin(struct vcpu *v, mfn_t smfn)
     ASSERT(mfn_valid(smfn));
     sp = mfn_to_page(smfn);
     head_type = sp->u.sh.type;
-    ASSERT(sh_type_is_pinnable(v, sp->u.sh.type));
+    ASSERT(sh_type_is_pinnable(d, sp->u.sh.type));
     ASSERT(sp->u.sh.head);
 
     if ( !sp->u.sh.pinned )
-- 
1.7.10.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.