[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH 17/17] x86/shadow: adjust is_pv_*() checks



To cover for "x86: correct is_pv_domain() when !CONFIG_PV" (or any other
change along those lines) we should prefer is_hvm_*(), as it may become
a build time constant while is_pv_*() generally won't.

Also when a domain pointer is in scope, prefer is_*_domain() over
is_*_vcpu().

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>

--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -674,7 +674,7 @@ int sh_unsync(struct vcpu *v, mfn_t gmfn
     if ( pg->shadow_flags &
          ((SHF_page_type_mask & ~SHF_L1_ANY) | SHF_out_of_sync)
          || sh_page_has_multiple_shadows(pg)
-         || is_pv_vcpu(v)
+         || !is_hvm_vcpu(v)
          || !v->domain->arch.paging.shadow.oos_active )
         return 0;
 
@@ -2419,7 +2419,7 @@ static void sh_update_paging_modes(struc
         v->arch.paging.mode->shadow.detach_old_tables(v);
 
 #ifdef CONFIG_HVM
-    if ( !is_pv_domain(d) )
+    if ( is_hvm_domain(d) )
     {
         const struct paging_mode *old_mode = v->arch.paging.mode;
 
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -666,8 +666,8 @@ _sh_propagate(struct vcpu *v,
     // PV guests in 64-bit mode use two different page tables for user vs
     // supervisor permissions, making the guest's _PAGE_USER bit irrelevant.
     // It is always shadowed as present...
-    if ( (GUEST_PAGING_LEVELS == 4) && !is_pv_32bit_domain(d)
-         && is_pv_domain(d) )
+    if ( (GUEST_PAGING_LEVELS == 4) && !is_hvm_domain(d) &&
+         !is_pv_32bit_domain(d) )
     {
         sflags |= _PAGE_USER;
     }
@@ -1119,7 +1119,7 @@ static shadow_l2e_t * shadow_get_and_cre
         unsigned int t = SH_type_l2_shadow;
 
         /* Tag compat L2 containing hypervisor (m2p) mappings */
-        if ( is_pv_32bit_vcpu(v) &&
+        if ( is_pv_32bit_domain(d) &&
              guest_l4_table_offset(gw->va) == 0 &&
              guest_l3_table_offset(gw->va) == 3 )
             t = SH_type_l2h_shadow;
@@ -2313,7 +2313,7 @@ static int sh_page_fault(struct vcpu *v,
         return 0;
     }
 
-    cpl = is_pv_vcpu(v) ? (regs->ss & 3) : hvm_get_cpl(v);
+    cpl = is_hvm_domain(d) ? hvm_get_cpl(v) : (regs->ss & 3);
 
  rewalk:
 
@@ -3236,7 +3236,7 @@ sh_update_cr3(struct vcpu *v, int do_loc
 #endif
 
     /* Don't do anything on an uninitialised vcpu */
-    if ( is_pv_domain(d) && !v->is_initialised )
+    if ( !is_hvm_domain(d) && !v->is_initialised )
     {
         ASSERT(v->arch.cr3 == 0);
         return;
--- a/xen/arch/x86/mm/shadow/none.c
+++ b/xen/arch/x86/mm/shadow/none.c
@@ -27,7 +27,7 @@ int shadow_domain_init(struct domain *d)
     };
 
     paging_log_dirty_init(d, &sh_none_ops);
-    return is_pv_domain(d) ? 0 : -EOPNOTSUPP;
+    return is_hvm_domain(d) ? -EOPNOTSUPP : 0;
 }
 
 static int _page_fault(struct vcpu *v, unsigned long va,




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.