[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v3] memory: XENMEM_add_to_physmap (almost) wrapping checks
Determining that behavior is correct (i.e. results in failure) for a passed in GFN equaling INVALID_GFN is non-trivial. Make this quite a bit more obvious by checking input in generic code - both for singular requests to not match the value and for range ones to not pass / wrap through it. For Arm similarly make more obvious that no wrapping of MFNs passed for XENMAPSPACE_dev_mmio and thus to map_dev_mmio_region() can occur: Drop the "nr" parameter of the function to avoid future callers appearing which might not themselves check for wrapping. Otherwise the respective ASSERT() in rangeset_contains_range() could trigger. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> --- v3: Rename function to map_dev_mmio_page(). v2: Add comment to BUILD_BUG_ON(). Avoid transiently #define-ing _gfn() (by way of new prereq patch). --- a/xen/arch/arm/mm.c +++ b/xen/arch/arm/mm.c @@ -1479,7 +1479,7 @@ int xenmem_add_to_physmap_one( break; } case XENMAPSPACE_dev_mmio: - rc = map_dev_mmio_region(d, gfn, 1, _mfn(idx)); + rc = map_dev_mmio_page(d, gfn, _mfn(idx)); return rc; default: --- a/xen/arch/arm/p2m.c +++ b/xen/arch/arm/p2m.c @@ -1355,21 +1355,18 @@ int unmap_mmio_regions(struct domain *d, return p2m_remove_mapping(d, start_gfn, nr, mfn); } -int map_dev_mmio_region(struct domain *d, - gfn_t gfn, - unsigned long nr, - mfn_t mfn) +int map_dev_mmio_page(struct domain *d, gfn_t gfn, mfn_t mfn) { int res; - if ( !(nr && iomem_access_permitted(d, mfn_x(mfn), mfn_x(mfn) + nr - 1)) ) + if ( !iomem_access_permitted(d, mfn_x(mfn), mfn_x(mfn)) ) return 0; - res = p2m_insert_mapping(d, gfn, nr, mfn, p2m_mmio_direct_c); + res = p2m_insert_mapping(d, gfn, 1, mfn, p2m_mmio_direct_c); if ( res < 0 ) { - printk(XENLOG_G_ERR "Unable to map MFNs [%#"PRI_mfn" - %#"PRI_mfn" in Dom%d\n", - mfn_x(mfn), mfn_x(mfn) + nr - 1, d->domain_id); + printk(XENLOG_G_ERR "Unable to map MFN %#"PRI_mfn" in %pd\n", + mfn_x(mfn), d); return res; } --- a/xen/common/grant_table.c +++ b/xen/common/grant_table.c @@ -4157,7 +4157,10 @@ int gnttab_map_frame(struct domain *d, u bool status = false; if ( gfn_eq(gfn, INVALID_GFN) ) + { + ASSERT_UNREACHABLE(); return -EINVAL; + } grant_write_lock(gt); --- a/xen/common/memory.c +++ b/xen/common/memory.c @@ -832,6 +832,9 @@ int xenmem_add_to_physmap(struct domain return -EACCES; } + if ( gfn_eq(_gfn(xatp->gpfn), INVALID_GFN) ) + return -EINVAL; + if ( xatp->space == XENMAPSPACE_gmfn_foreign ) extra.foreign_domid = DOMID_INVALID; @@ -842,6 +845,18 @@ int xenmem_add_to_physmap(struct domain if ( xatp->size < start ) return -EILSEQ; + if ( xatp->gpfn + xatp->size < xatp->gpfn || + xatp->idx + xatp->size < xatp->idx ) + { + /* + * Make sure INVALID_GFN is the highest representable value, i.e. + * guaranteeing that it won't fall in the middle of the + * [xatp->gpfn, xatp->gpfn + xatp->size) range checked above. + */ + BUILD_BUG_ON(INVALID_GFN_RAW + 1); + return -EOVERFLOW; + } + xatp->idx += start; xatp->gpfn += start; xatp->size -= start; @@ -962,6 +977,9 @@ static int xenmem_add_to_physmap_batch(s extent, 1)) ) return -EFAULT; + if ( gfn_eq(_gfn(gpfn), INVALID_GFN) ) + return -EINVAL; + rc = xenmem_add_to_physmap_one(d, xatpb->space, extra, idx, _gfn(gpfn)); --- a/xen/arch/arm/include/asm/p2m.h +++ b/xen/arch/arm/include/asm/p2m.h @@ -295,10 +295,7 @@ int unmap_regions_p2mt(struct domain *d, unsigned long nr, mfn_t mfn); -int map_dev_mmio_region(struct domain *d, - gfn_t gfn, - unsigned long nr, - mfn_t mfn); +int map_dev_mmio_page(struct domain *d, gfn_t gfn, mfn_t mfn); int p2m_insert_mapping(struct domain *d, gfn_t start_gfn, unsigned long nr, mfn_t mfn, p2m_type_t t);
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |