[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 09/10] x86/shadow: drop stray name tags from sh_{guest_get, map}_eff_l1e()



They (as a now being removed comment validly says) depend only on Xen's
number of page table levels, and hence their tags didn't serve any
useful purpose (there could only ever be one instance in a single
binary, even back in the x86-32 days).

Further conditionalize the inclusion of PV-specific hook pointers, at
once making sure that PV guests can't ever get other than 4-level mode
enabled for them.

For consistency reasons shadow_{write,cmpxchg}_guest_entry() also get
moved next to the other PV-only actors, allowing them to become static
just like the $subject ones do.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>

--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -84,7 +84,9 @@ void shadow_vcpu_init(struct vcpu *v)
     }
 #endif
 
-    v->arch.paging.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode, 3);
+    v->arch.paging.mode = is_pv_vcpu(v) ?
+                          &SHADOW_INTERNAL_NAME(sh_paging_mode, 4) :
+                          &SHADOW_INTERNAL_NAME(sh_paging_mode, 3);
 }
 
 #if SHADOW_AUDIT
@@ -1128,38 +1130,6 @@ sh_validate_guest_pt_write(struct vcpu *
     }
 }
 
-int shadow_write_guest_entry(struct vcpu *v, intpte_t *p,
-                             intpte_t new, mfn_t gmfn)
-/* Write a new value into the guest pagetable, and update the shadows
- * appropriately.  Returns 0 if we page-faulted, 1 for success. */
-{
-    int failed;
-    paging_lock(v->domain);
-    failed = __copy_to_user(p, &new, sizeof(new));
-    if ( failed != sizeof(new) )
-        sh_validate_guest_entry(v, gmfn, p, sizeof(new));
-    paging_unlock(v->domain);
-    return (failed == 0);
-}
-
-int shadow_cmpxchg_guest_entry(struct vcpu *v, intpte_t *p,
-                               intpte_t *old, intpte_t new, mfn_t gmfn)
-/* Cmpxchg a new value into the guest pagetable, and update the shadows
- * appropriately. Returns 0 if we page-faulted, 1 if not.
- * N.B. caller should check the value of "old" to see if the
- * cmpxchg itself was successful. */
-{
-    int failed;
-    intpte_t t = *old;
-    paging_lock(v->domain);
-    failed = cmpxchg_user(p, t, new);
-    if ( t == *old )
-        sh_validate_guest_entry(v, gmfn, p, sizeof(new));
-    *old = t;
-    paging_unlock(v->domain);
-    return (failed == 0);
-}
-
 
 /**************************************************************************/
 /* Memory management for shadow pages. */
@@ -2782,14 +2752,7 @@ static void sh_update_paging_modes(struc
     if ( v->arch.paging.mode )
         v->arch.paging.mode->shadow.detach_old_tables(v);
 
-    if ( is_pv_domain(d) )
-    {
-        ///
-        /// PV guest
-        ///
-        v->arch.paging.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode, 4);
-    }
-    else
+    if ( !is_pv_domain(d) )
     {
         ///
         /// HVM guest
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -371,7 +371,7 @@ static void sh_audit_gw(struct vcpu *v, 
 
 
 #if (CONFIG_PAGING_LEVELS == GUEST_PAGING_LEVELS)
-void *
+static void *
 sh_guest_map_l1e(struct vcpu *v, unsigned long addr,
                   unsigned long *gl1mfn)
 {
@@ -395,7 +395,7 @@ sh_guest_map_l1e(struct vcpu *v, unsigne
     return pl1e;
 }
 
-void
+static void
 sh_guest_get_eff_l1e(struct vcpu *v, unsigned long addr, void *eff_l1e)
 {
     walk_t gw;
@@ -408,6 +408,48 @@ sh_guest_get_eff_l1e(struct vcpu *v, uns
     (void) sh_walk_guest_tables(v, addr, &gw, PFEC_page_present);
     *(guest_l1e_t *)eff_l1e = gw.l1e;
 }
+
+/*
+ * Write a new value into the guest pagetable, and update the shadows
+ * appropriately.  Returns 0 if we page-faulted, 1 for success.
+ */
+static int
+sh_write_guest_entry(struct vcpu *v, guest_intpte_t *p,
+                     guest_intpte_t new, mfn_t gmfn)
+{
+    int failed;
+
+    paging_lock(v->domain);
+    failed = __copy_to_user(p, &new, sizeof(new));
+    if ( failed != sizeof(new) )
+        sh_validate_guest_entry(v, gmfn, p, sizeof(new));
+    paging_unlock(v->domain);
+
+    return !failed;
+}
+
+/*
+ * Cmpxchg a new value into the guest pagetable, and update the shadows
+ * appropriately. Returns 0 if we page-faulted, 1 if not.
+ * N.B. caller should check the value of "old" to see if the cmpxchg itself
+ * was successful.
+ */
+static int
+sh_cmpxchg_guest_entry(struct vcpu *v, guest_intpte_t *p,
+                       guest_intpte_t *old, guest_intpte_t new, mfn_t gmfn)
+{
+    int failed;
+    guest_intpte_t t = *old;
+
+    paging_lock(v->domain);
+    failed = cmpxchg_user(p, t, new);
+    if ( t == *old )
+        sh_validate_guest_entry(v, gmfn, p, sizeof(new));
+    *old = t;
+    paging_unlock(v->domain);
+
+    return !failed;
+}
 #endif /* CONFIG == GUEST (== SHADOW) */
 
 /**************************************************************************/
@@ -5184,10 +5226,12 @@ const struct paging_mode sh_paging_mode 
     .update_cr3                    = sh_update_cr3,
     .update_paging_modes           = shadow_update_paging_modes,
     .write_p2m_entry               = shadow_write_p2m_entry,
-    .write_guest_entry             = shadow_write_guest_entry,
-    .cmpxchg_guest_entry           = shadow_cmpxchg_guest_entry,
+#if CONFIG_PAGING_LEVELS == GUEST_PAGING_LEVELS
+    .write_guest_entry             = sh_write_guest_entry,
+    .cmpxchg_guest_entry           = sh_cmpxchg_guest_entry,
     .guest_map_l1e                 = sh_guest_map_l1e,
     .guest_get_eff_l1e             = sh_guest_get_eff_l1e,
+#endif
     .guest_levels                  = GUEST_PAGING_LEVELS,
     .shadow.detach_old_tables      = sh_detach_old_tables,
     .shadow.x86_emulate_write      = sh_x86_emulate_write,
--- a/xen/arch/x86/mm/shadow/multi.h
+++ b/xen/arch/x86/mm/shadow/multi.h
@@ -98,13 +98,6 @@ SHADOW_INTERNAL_NAME(sh_audit_l4_table, 
     (struct vcpu *v, mfn_t sl4mfn, mfn_t x);
 #endif
 
-extern void *
-SHADOW_INTERNAL_NAME(sh_guest_map_l1e, GUEST_LEVELS)
-    (struct vcpu *v, unsigned long va, unsigned long *gl1mfn);
-extern void
-SHADOW_INTERNAL_NAME(sh_guest_get_eff_l1e, GUEST_LEVELS)
-    (struct vcpu *v, unsigned long va, void *eff_l1e);
-
 extern const struct paging_mode
 SHADOW_INTERNAL_NAME(sh_paging_mode, GUEST_LEVELS);
 
--- a/xen/arch/x86/mm/shadow/private.h
+++ b/xen/arch/x86/mm/shadow/private.h
@@ -383,10 +383,6 @@ extern int sh_remove_write_access(struct
 void shadow_write_p2m_entry(struct domain *d, unsigned long gfn,
                             l1_pgentry_t *p, l1_pgentry_t new,
                             unsigned int level);
-int shadow_write_guest_entry(struct vcpu *v, intpte_t *p,
-                             intpte_t new, mfn_t gmfn);
-int shadow_cmpxchg_guest_entry(struct vcpu *v, intpte_t *p,
-                               intpte_t *old, intpte_t new, mfn_t gmfn);
 
 /* Update all the things that are derived from the guest's CR0/CR3/CR4.
  * Called to initialize paging structures if the paging mode
--- a/xen/arch/x86/mm/shadow/types.h
+++ b/xen/arch/x86/mm/shadow/types.h
@@ -262,12 +262,6 @@ static inline shadow_l4e_t shadow_l4e_fr
 #define sh_rm_write_access_from_sl1p 
INTERNAL_NAME(sh_rm_write_access_from_sl1p)
 #endif
 
-/* The sh_guest_(map|get)_* functions depends on Xen's paging levels */
-#define sh_guest_map_l1e \
-        SHADOW_INTERNAL_NAME(sh_guest_map_l1e, CONFIG_PAGING_LEVELS)
-#define sh_guest_get_eff_l1e \
-        SHADOW_INTERNAL_NAME(sh_guest_get_eff_l1e, CONFIG_PAGING_LEVELS)
-
 /* sh_make_monitor_table depends only on the number of shadow levels */
 #define sh_make_monitor_table \
         SHADOW_SH_NAME(sh_make_monitor_table, SHADOW_PAGING_LEVELS)


Attachment: x86-sh-pv-guest-l1e.patch
Description: Text document

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.