[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH 4/5] arm/mpu: Implement ioremap_attr for MPU
From: Luca Fancellu <luca.fancellu@xxxxxxx> Introduce helpers (un)map_mm_range() in order to allow the temporary mapping of a range of memory, and use these to implement the function `ioremap_attr` for MPU systems. Signed-off-by: Luca Fancellu <luca.fancellu@xxxxxxx> Signed-off-by: Hari Limaye <hari.limaye@xxxxxxx> --- xen/arch/arm/include/asm/mpu/mm.h | 22 +++++ xen/arch/arm/mpu/mm.c | 150 ++++++++++++++++++++++++++++-- 2 files changed, 163 insertions(+), 9 deletions(-) diff --git a/xen/arch/arm/include/asm/mpu/mm.h b/xen/arch/arm/include/asm/mpu/mm.h index 56ca411af4..177550f5bd 100644 --- a/xen/arch/arm/include/asm/mpu/mm.h +++ b/xen/arch/arm/include/asm/mpu/mm.h @@ -106,6 +106,28 @@ int xen_mpumap_update(paddr_t base, paddr_t limit, unsigned int flags, */ pr_t pr_of_addr(paddr_t base, paddr_t limit, unsigned int flags); +/* + * Maps a temporary range of memory with attributes `flags`; if the range is + * already mapped with the same attributes, including an inclusive match, the + * existing mapping is returned. This API is intended for mappings that exist + * transiently for a short period between calls to this function and + * `unmap_mm_range`. + * + * @param start Base address of the range to map (inclusive). + * @param end Limit address of the range to map (exclusive). + * @param flags Flags for the memory range to map. + * @return Pointer to start of region on success, NULL on error. + */ +void *map_mm_range(paddr_t start, paddr_t end, unsigned int flags); + +/* + * Unmaps a temporary range of memory if it was previously mapped by + * map_mm_range, otherwise it does not remove the mapping. + * + * @param start Base address of the range to map (inclusive). + */ +void unmap_mm_range(paddr_t start); + /* * Checks whether a given memory range is present in the provided table of * MPU protection regions. diff --git a/xen/arch/arm/mpu/mm.c b/xen/arch/arm/mpu/mm.c index 38474bcfa2..cf6f95ef85 100644 --- a/xen/arch/arm/mpu/mm.c +++ b/xen/arch/arm/mpu/mm.c @@ -318,31 +318,39 @@ static int xen_mpumap_update_entry(paddr_t base, paddr_t limit, return 0; } -int xen_mpumap_update(paddr_t base, paddr_t limit, unsigned int flags, - bool transient) +static bool check_mpu_mapping(paddr_t base, paddr_t limit, unsigned int flags) { - int rc; - if ( flags_has_rwx(flags) ) { printk("Mappings should not be both Writeable and Executable\n"); - return -EINVAL; + return false; } if ( base >= limit ) { printk("Base address %#"PRIpaddr" must be smaller than limit address %#"PRIpaddr"\n", base, limit); - return -EINVAL; + return false; } if ( !IS_ALIGNED(base, PAGE_SIZE) || !IS_ALIGNED(limit, PAGE_SIZE) ) { printk("base address %#"PRIpaddr", or limit address %#"PRIpaddr" is not page aligned\n", base, limit); - return -EINVAL; + return false; } + return true; +} + +int xen_mpumap_update(paddr_t base, paddr_t limit, unsigned int flags, + bool transient) +{ + int rc; + + if ( !check_mpu_mapping(base, limit, flags) ) + return -EINVAL; + spin_lock(&xen_mpumap_lock); rc = xen_mpumap_update_entry(base, limit, flags, transient); @@ -453,10 +461,134 @@ void free_init_memory(void) BUG_ON("unimplemented"); } +static uint8_t is_mm_range_mapped(paddr_t start, paddr_t end) +{ + int rc; + uint8_t idx; + + ASSERT(spin_is_locked(&xen_mpumap_lock)); + + rc = mpumap_contains_region(xen_mpumap, max_mpu_regions, start, end, &idx); + if ( rc < 0 ) + panic("Cannot handle overlapping MPU memory protection regions\n"); + + /* + * 'idx' will be INVALID_REGION_IDX for rc == MPUMAP_REGION_NOTFOUND and + * it will be a proper region index when rc >= MPUMAP_REGION_FOUND. + */ + return idx; +} + +static bool is_mm_attr_match(pr_t *region, unsigned int attributes) +{ + bool ret = true; + + if ( region->prbar.reg.ro != PAGE_RO_MASK(attributes) ) + { + printk(XENLOG_WARNING + "Mismatched Access Permission attributes (%#x0 instead of %#x0)\n", + region->prbar.reg.ro, PAGE_RO_MASK(attributes)); + ret = false; + } + + if ( region->prbar.reg.xn != PAGE_XN_MASK(attributes) ) + { + printk(XENLOG_WARNING + "Mismatched Execute Never attributes (%#x instead of %#x)\n", + region->prbar.reg.xn, PAGE_XN_MASK(attributes)); + ret = false; + } + + if ( region->prlar.reg.ai != PAGE_AI_MASK(attributes) ) + { + printk(XENLOG_WARNING + "Mismatched Memory Attribute Index (%#x instead of %#x)\n", + region->prlar.reg.ai, PAGE_AI_MASK(attributes)); + ret = false; + } + + return ret; +} + +void *map_mm_range(paddr_t start, paddr_t end, unsigned int flags) +{ + paddr_t start_pg = round_pgdown(start); + paddr_t end_pg = round_pgup(end); + void *ret = NULL; + uint8_t idx; + + if ( !check_mpu_mapping(start_pg, end_pg, flags) ) + return NULL; + + spin_lock(&xen_mpumap_lock); + + idx = is_mm_range_mapped(start_pg, end_pg); + if ( idx != INVALID_REGION_IDX ) + { + /* Already mapped with different attributes */ + if ( !is_mm_attr_match(&xen_mpumap[idx], flags) ) + { + printk(XENLOG_WARNING + "Range %#"PRIpaddr"-%#"PRIpaddr" already mapped with different flags\n", + start_pg, end_pg); + goto out; + } + + /* Already mapped with same attributes */ + ret = maddr_to_virt(start); + goto out; + } + + if ( !xen_mpumap_update_entry(start_pg, end_pg, flags, true) ) + { + context_sync_mpu(); + ret = maddr_to_virt(start); + } + + out: + spin_unlock(&xen_mpumap_lock); + + return ret; +} + +void unmap_mm_range(paddr_t start) +{ + uint8_t idx; + + spin_lock(&xen_mpumap_lock); + + /* + * Mappings created via map_mm_range are at least PAGE_SIZE. Find the idx + * of the MPU memory region containing `start` mapped through map_mm_range. + */ + idx = is_mm_range_mapped(start, start + PAGE_SIZE); + if ( idx == INVALID_REGION_IDX ) + { + printk(XENLOG_ERR + "Failed to unmap_mm_range MPU memory region at %#"PRIpaddr"\n", + start); + goto out; + } + + /* This API is only meant to unmap transient regions */ + if ( !region_is_transient(&xen_mpumap[idx]) ) + goto out; + + /* Disable MPU memory region and clear the associated entry in xen_mpumap */ + disable_mpu_region_from_index(idx); + context_sync_mpu(); + + out: + spin_unlock(&xen_mpumap_lock); +} + void __iomem *ioremap_attr(paddr_t start, size_t len, unsigned int flags) { - BUG_ON("unimplemented"); - return NULL; + if ( !map_mm_range(start, start + len, flags) ) + return NULL; + + /* Mapped or already mapped */ + return maddr_to_virt(start); } /* -- 2.34.1
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |