|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v2] arm/mm: Fix resource handling in xenmem_add_to_physmap_one
The current implementation of xenmem_add_to_physmap_one() does not check
for or remove existing mappings at the target GFN before inserting new
mappings. This can lead to pages not being properly freed (until domain
destruction) when replaced.
Add a proper old mapping detection and cleanup:
* Check for existing mappings at the target GFN before insertion
* For special pages: unmap without freeing (not owned by domain)
* For MMIO mappings: reject with -EPERM (questionable)
* For grant mappings: reject with -EPERM (cleanup not implemented in
p2m_put_l3_page)
* For RAM and foreign mappings: properly remove via guest_remove_page()
* Optimize same-MFN remapping: detect and return early (no-op)
Fix XENMAPSPACE_dev_mmio by inlining its permission check and logic
into the main switch statement, allowing it to go through the same cleanup
path as other mapping types. This removes the early return that bypassed
cleanup. The now-unused map_dev_mmio_page() function is removed.
Foreign mappings are allowed to be removed (matching x86 behavior) because
proper cleanup via p2m_put_foreign_page() is already implemented in
p2m_put_l3_page(). Grant mappings are rejected because cleanup is not yet
implemented.
Add new helpers p2m_is_grant() and p2m_is_mmio().
Note: There are race conditions due to multiple P2M lock/unlock cycles
during the check-remove-insert sequence. These are documented and
subject to future improvements.
Signed-off-by: Michal Orzel <michal.orzel@xxxxxxx>
---
Changes in v2:
- handle all the spaces
- drop references
- add a comment about races (not handled in this attempt)
guest_remove_page would work on MMIO, but for now I decided to take a
conservative
approach and reject it.
---
xen/arch/arm/include/asm/p2m.h | 9 ++++-
xen/arch/arm/mm.c | 73 +++++++++++++++++++++++++++++++---
xen/arch/arm/p2m.c | 18 ---------
3 files changed, 74 insertions(+), 26 deletions(-)
diff --git a/xen/arch/arm/include/asm/p2m.h b/xen/arch/arm/include/asm/p2m.h
index 010ce8c9ebbd..d88f4f65c7bc 100644
--- a/xen/arch/arm/include/asm/p2m.h
+++ b/xen/arch/arm/include/asm/p2m.h
@@ -149,6 +149,11 @@ static inline p2m_type_t arch_dt_passthrough_p2m_type(void)
#define P2M_RAM_TYPES (p2m_to_mask(p2m_ram_rw) | \
p2m_to_mask(p2m_ram_ro))
+/* MMIO types */
+#define P2M_MMIO_TYPES (p2m_to_mask(p2m_mmio_direct_dev) | \
+ p2m_to_mask(p2m_mmio_direct_nc) | \
+ p2m_to_mask(p2m_mmio_direct_c))
+
/* Grant mapping types, which map to a real frame in another VM */
#define P2M_GRANT_TYPES (p2m_to_mask(p2m_grant_map_rw) | \
p2m_to_mask(p2m_grant_map_ro))
@@ -159,6 +164,8 @@ static inline p2m_type_t arch_dt_passthrough_p2m_type(void)
/* Useful predicates */
#define p2m_is_ram(_t) (p2m_to_mask(_t) & P2M_RAM_TYPES)
+#define p2m_is_mmio(_t) (p2m_to_mask(_t) & P2M_MMIO_TYPES)
+#define p2m_is_grant(_t) (p2m_to_mask(_t) & P2M_GRANT_TYPES)
#define p2m_is_foreign(_t) (p2m_to_mask(_t) & P2M_FOREIGN_TYPES)
#define p2m_is_any_ram(_t) (p2m_to_mask(_t) & \
(P2M_RAM_TYPES | P2M_GRANT_TYPES | \
@@ -318,8 +325,6 @@ int unmap_regions_p2mt(struct domain *d,
unsigned long nr,
mfn_t mfn);
-int map_dev_mmio_page(struct domain *d, gfn_t gfn, mfn_t mfn);
-
int p2m_insert_mapping(struct domain *d, gfn_t start_gfn, unsigned long nr,
mfn_t mfn, p2m_type_t t);
diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
index 6df8b616e464..0cee65d5ab5a 100644
--- a/xen/arch/arm/mm.c
+++ b/xen/arch/arm/mm.c
@@ -11,6 +11,7 @@
#include <xen/domain_page.h>
#include <xen/grant_table.h>
#include <xen/guest_access.h>
+#include <xen/iocap.h>
#include <xen/mm.h>
#include <xen/static-memory.h>
#include <xen/static-shmem.h>
@@ -166,10 +167,11 @@ int xenmem_add_to_physmap_one(
unsigned long idx,
gfn_t gfn)
{
- mfn_t mfn = INVALID_MFN;
+ mfn_t mfn = INVALID_MFN, mfn_old;
int rc;
- p2m_type_t t;
+ p2m_type_t t, p2mt_old;
struct page_info *page = NULL;
+ struct p2m_domain *p2m = p2m_get_hostp2m(d);
switch ( space )
{
@@ -237,13 +239,73 @@ int xenmem_add_to_physmap_one(
break;
}
case XENMAPSPACE_dev_mmio:
- rc = map_dev_mmio_page(d, gfn, _mfn(idx));
- return rc;
+ if ( !iomem_access_permitted(d, idx, idx) )
+ return 0;
+
+ mfn = _mfn(idx);
+ t = p2m_mmio_direct_c;
+ break;
default:
return -ENOSYS;
}
+ /*
+ * Release the old page reference if it was present.
+ *
+ * TODO: There are race conditions in this code due to multiple lock/unlock
+ * cycles:
+ *
+ * Race #1: Between checking the old mapping and removing it, another CPU
+ * could replace the mapping. We would then remove the wrong mapping.
+ *
+ * Race #2: Between removing the old mapping and inserting the new one,
+ * another CPU could insert a different mapping. We would then silently
+ * overwrite it.
+ *
+ * For now, we accept these races as they require concurrent
+ * xenmem_add_to_physmap_one operations on the same GFN, which is not a
+ * normal usage pattern.
+ */
+ p2m_read_lock(p2m);
+ mfn_old = p2m_get_entry(p2m, gfn, &p2mt_old, NULL, NULL, NULL);
+ p2m_read_unlock(p2m);
+
+ if ( mfn_valid(mfn_old) && !mfn_eq(mfn, mfn_old) )
+ {
+ if ( is_special_page(mfn_to_page(mfn_old)) )
+ {
+ /* Just unmap, don't free */
+ p2m_write_lock(p2m);
+ rc = p2m_set_entry(p2m, gfn, 1, INVALID_MFN,
+ p2m_invalid, p2m->default_access);
+ p2m_write_unlock(p2m);
+ if ( rc )
+ goto out;
+ }
+ else if ( p2m_is_mmio(p2mt_old) || p2m_is_grant(p2mt_old) )
+ {
+ /* Reject MMIO and grant replacements */
+ rc = -EPERM;
+ goto out;
+ }
+ else
+ {
+ /* Allow RAM and foreign - both have proper cleanup */
+ rc = guest_remove_page(d, gfn_x(gfn));
+ if ( rc )
+ goto out;
+ }
+ }
+ else if ( mfn_valid(mfn_old) )
+ {
+ /* Mapping already exists. Drop the references taken above */
+ if ( page != NULL )
+ put_page(page);
+
+ return 0;
+ }
+
/*
* Map at new location. Here we need to map xenheap RAM page differently
* because we need to store the valid GFN and make sure that nothing was
@@ -255,8 +317,6 @@ int xenmem_add_to_physmap_one(
rc = guest_physmap_add_entry(d, gfn, mfn, 0, t);
else
{
- struct p2m_domain *p2m = p2m_get_hostp2m(d);
-
p2m_write_lock(p2m);
if ( gfn_eq(page_get_xenheap_gfn(mfn_to_page(mfn)), INVALID_GFN) )
{
@@ -276,6 +336,7 @@ int xenmem_add_to_physmap_one(
p2m_write_unlock(p2m);
}
+ out:
/*
* For XENMAPSPACE_gmfn_foreign if we failed to add the mapping, we need
* to drop the reference we took earlier. In all other cases we need to
diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
index fb03978a19af..c91568926e4c 100644
--- a/xen/arch/arm/p2m.c
+++ b/xen/arch/arm/p2m.c
@@ -200,24 +200,6 @@ int unmap_mmio_regions(struct domain *d,
return p2m_remove_mapping(d, start_gfn, nr, mfn);
}
-int map_dev_mmio_page(struct domain *d, gfn_t gfn, mfn_t mfn)
-{
- int res;
-
- if ( !iomem_access_permitted(d, mfn_x(mfn), mfn_x(mfn)) )
- return 0;
-
- res = p2m_insert_mapping(d, gfn, 1, mfn, p2m_mmio_direct_c);
- if ( res < 0 )
- {
- printk(XENLOG_G_ERR "Unable to map MFN %#"PRI_mfn" in %pd\n",
- mfn_x(mfn), d);
- return res;
- }
-
- return 0;
-}
-
int guest_physmap_add_entry(struct domain *d,
gfn_t gfn,
mfn_t mfn,
--
2.43.0
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |