|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v2 2/3] x86/mm: clean up SHARED_M2P{, _ENTRY} uses
Stop open-coding SHARED_M2P() and drop a pointless use of it from
paging_mfn_is_dirty() (!VALID_M2P() is a superset of SHARED_M2P()) and
another one from free_page_type() (prior assertions render this
redundant).
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
Acked-by: Tim Deegan <tim@xxxxxxx>
---
v2: Re-do free_page_type() change.
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -2369,9 +2369,7 @@ int free_page_type(struct page_info *pag
ASSERT(!shadow_mode_refcounts(owner));
gmfn = mfn_to_gmfn(owner, mfn_x(page_to_mfn(page)));
- ASSERT(VALID_M2P(gmfn));
- /* Page sharing not supported for shadowed domains */
- if(!SHARED_M2P(gmfn))
+ if ( VALID_M2P(gmfn) )
shadow_remove_all_shadows(owner, _mfn(gmfn));
}
@@ -4166,7 +4164,7 @@ int xenmem_add_to_physmap_one(
/* Unmap from old location, if any. */
old_gpfn = get_gpfn_from_mfn(mfn_x(mfn));
- ASSERT( old_gpfn != SHARED_M2P_ENTRY );
+ ASSERT(!SHARED_M2P(old_gpfn));
if ( (space == XENMAPSPACE_gmfn || space == XENMAPSPACE_gmfn_range) &&
old_gpfn != gfn )
{
--- a/xen/arch/x86/mm/mem_sharing.c
+++ b/xen/arch/x86/mm/mem_sharing.c
@@ -409,7 +409,7 @@ static struct page_info* mem_sharing_loo
unsigned long t = read_atomic(&page->u.inuse.type_info);
ASSERT((t & PGT_type_mask) == PGT_shared_page);
ASSERT((t & PGT_count_mask) >= 2);
- ASSERT(get_gpfn_from_mfn(mfn) == SHARED_M2P_ENTRY);
+ ASSERT(SHARED_M2P(get_gpfn_from_mfn(mfn)));
return page;
}
}
@@ -469,7 +469,7 @@ static int audit(void)
}
/* Check the m2p entry */
- if ( get_gpfn_from_mfn(mfn_x(mfn)) != SHARED_M2P_ENTRY )
+ if ( !SHARED_M2P(get_gpfn_from_mfn(mfn_x(mfn))) )
{
MEM_SHARING_DEBUG("mfn %lx shared, but wrong m2p entry (%lx)!\n",
mfn_x(mfn), get_gpfn_from_mfn(mfn_x(mfn)));
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -2583,7 +2583,7 @@ void audit_p2m(struct domain *d,
continue;
}
- if ( gfn == SHARED_M2P_ENTRY )
+ if ( SHARED_M2P(gfn) )
{
P2M_PRINTK("shared mfn (%lx) on domain page list!\n",
mfn);
--- a/xen/arch/x86/mm/p2m-pt.c
+++ b/xen/arch/x86/mm/p2m-pt.c
@@ -1059,8 +1059,7 @@ long p2m_pt_audit_p2m(struct p2m_domain
{
m2pfn = get_gpfn_from_mfn(mfn+i1);
/* Allow shared M2Ps */
- if ( (m2pfn != (gfn + i1)) &&
- (m2pfn != SHARED_M2P_ENTRY) )
+ if ( (m2pfn != (gfn + i1)) && !SHARED_M2P(m2pfn) )
{
pmbad++;
P2M_PRINTK("mismatch: gfn %#lx -> mfn %#lx"
--- a/xen/arch/x86/mm/paging.c
+++ b/xen/arch/x86/mm/paging.c
@@ -369,8 +369,8 @@ int paging_mfn_is_dirty(struct domain *d
/* We /really/ mean PFN here, even for non-translated guests. */
pfn = _pfn(get_gpfn_from_mfn(mfn_x(gmfn)));
- /* Shared pages are always read-only; invalid pages can't be dirty. */
- if ( unlikely(SHARED_M2P(pfn_x(pfn)) || !VALID_M2P(pfn_x(pfn))) )
+ /* Invalid pages can't be dirty. */
+ if ( unlikely(!VALID_M2P(pfn_x(pfn))) )
return 0;
mfn = d->arch.paging.log_dirty.top;
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |