[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v4 4/7] arm/mpu: Provide access to the MPU region from the C code
Implement some utility function in order to access the MPU regions from the C world. Signed-off-by: Luca Fancellu <luca.fancellu@xxxxxxx> --- v4 changes: - moved back PRBAR0_EL2/PRLAR0_EL2 to mm.c and protect them with CONFIG_ARM_64, changed comments, fixed typos and code style - Add PRBAR_EL2_(n) definition, to be overriden by Arm32 - protect prepare_selector, read_protection_region, write_protection_region by Arm64 to ensure compilation on both arm32 and arm64, Arm32 will modify that later while introducing the arm32 bits. v3 changes: - Moved PRBAR0_EL2/PRLAR0_EL2 to arm64 specific - Modified prepare_selector() to be easily made a NOP for Arm32, which can address up to 32 region without changing selector and it is also its maximum amount of MPU regions. --- --- xen/arch/arm/include/asm/mpu.h | 1 + xen/arch/arm/include/asm/mpu/mm.h | 34 +++++++++ xen/arch/arm/mpu/mm.c | 117 ++++++++++++++++++++++++++++++ 3 files changed, 152 insertions(+) diff --git a/xen/arch/arm/include/asm/mpu.h b/xen/arch/arm/include/asm/mpu.h index 1368b2eb990f..40a86140b6cc 100644 --- a/xen/arch/arm/include/asm/mpu.h +++ b/xen/arch/arm/include/asm/mpu.h @@ -17,6 +17,7 @@ #define MPU_REGION_SHIFT 6 #define MPU_REGION_ALIGN (_AC(1, UL) << MPU_REGION_SHIFT) #define MPU_REGION_MASK (~(MPU_REGION_ALIGN - 1)) +#define MPU_REGION_RES0 (0xFFFFULL << 48) #define NUM_MPU_REGIONS_SHIFT 8 #define NUM_MPU_REGIONS (_AC(1, UL) << NUM_MPU_REGIONS_SHIFT) diff --git a/xen/arch/arm/include/asm/mpu/mm.h b/xen/arch/arm/include/asm/mpu/mm.h index 28339259c458..e2235e568e81 100644 --- a/xen/arch/arm/include/asm/mpu/mm.h +++ b/xen/arch/arm/include/asm/mpu/mm.h @@ -41,6 +41,40 @@ static inline struct page_info *virt_to_page(const void *v) return mfn_to_page(mfn); } +/* Utility function to be used whenever MPU regions are modified */ +static inline void context_sync_mpu(void) +{ + /* + * ARM DDI 0600B.a, C1.7.1 + * Writes to MPU registers are only guaranteed to be visible following a + * Context synchronization event and DSB operation. + */ + dsb(sy); + isb(); +} + +/* + * The following API requires context_sync_mpu() after being used to modify MPU + * regions: + * - write_protection_region + */ + +/* + * Reads the MPU region with index 'sel' from the HW. + * + * @pr_read: mpu protection region returned by read op. + * @sel: mpu protection region selector + */ +extern void read_protection_region(pr_t *pr_read, uint8_t sel); + +/* + * Writes the MPU region with index 'sel' to the HW. + * + * @pr_write: const mpu protection region passed through write op. + * @sel: mpu protection region selector + */ +extern void write_protection_region(const pr_t *pr_write, uint8_t sel); + #endif /* __ARM_MPU_MM_H__ */ /* diff --git a/xen/arch/arm/mpu/mm.c b/xen/arch/arm/mpu/mm.c index 9eab09ff2044..40ccf99adc94 100644 --- a/xen/arch/arm/mpu/mm.c +++ b/xen/arch/arm/mpu/mm.c @@ -8,6 +8,8 @@ #include <xen/sizes.h> #include <xen/types.h> #include <asm/mpu.h> +#include <asm/mpu/mm.h> +#include <asm/sysregs.h> struct page_info *frame_table; @@ -26,6 +28,35 @@ DECLARE_BITMAP(xen_mpumap_mask, MAX_MPU_REGION_NR) \ /* EL2 Xen MPU memory region mapping table. */ pr_t __section(".data.page_aligned") xen_mpumap[MAX_MPU_REGION_NR]; +#ifdef CONFIG_ARM_64 +/* + * The following are needed for the case generators GENERATE_WRITE_PR_REG_CASE + * and GENERATE_READ_PR_REG_CASE with num==0 + */ +#define PRBAR0_EL2 PRBAR_EL2 +#define PRLAR0_EL2 PRLAR_EL2 + +#define PRBAR_EL2_(n) PRBAR##n##_EL2 +#define PRLAR_EL2_(n) PRLAR##n##_EL2 + +#endif + +#define GENERATE_WRITE_PR_REG_CASE(num, pr) \ + case num: \ + { \ + WRITE_SYSREG(pr->prbar.bits & ~MPU_REGION_RES0, PRBAR_EL2_(num)); \ + WRITE_SYSREG(pr->prlar.bits & ~MPU_REGION_RES0, PRLAR_EL2_(num)); \ + break; \ + } + +#define GENERATE_READ_PR_REG_CASE(num, pr) \ + case num: \ + { \ + pr->prbar.bits = READ_SYSREG(PRBAR_EL2_(num)); \ + pr->prlar.bits = READ_SYSREG(PRLAR_EL2_(num)); \ + break; \ + } + static void __init __maybe_unused build_assertions(void) { /* @@ -36,6 +67,92 @@ static void __init __maybe_unused build_assertions(void) BUILD_BUG_ON(PAGE_SIZE != SZ_4K); } +#ifdef CONFIG_ARM_64 +/* + * Armv8-R supports direct access and indirect access to the MPU regions through + * registers, indirect access involves changing the MPU region selector, issuing + * an isb barrier and accessing the selected region through specific registers; + * instead direct access involves accessing specific registers that points to + * a specific MPU region, without changing the selector (in some cases) and + * issuing barriers because of that. + * For Arm64 the PR{B,L}AR_ELx (for n=0) and PR{B,L}AR<n>_ELx, n=1..15, are used + * for the direct access to the regions selected by PRSELR_EL2.REGION<7:4>:n, so + * 16 regions can be directly access when the selector is multiple of 16, giving + * access to all the supported memory regions. + */ +static void prepare_selector(uint8_t *sel) +{ + uint8_t cur_sel = *sel; + + /* + * {read,write}_protection_region works using the direct access to the 0..15 + * regions, so in order to save the isb() overhead, change the PRSELR_EL2 + * only when needed, so when the upper 4 bits of the selector will change. + */ + cur_sel &= 0xF0U; + if ( READ_SYSREG(PRSELR_EL2) != cur_sel ) + { + WRITE_SYSREG(cur_sel, PRSELR_EL2); + isb(); + } + *sel = *sel & 0xFU; +} + +void read_protection_region(pr_t *pr_read, uint8_t sel) +{ + prepare_selector(&sel); + + switch ( sel ) + { + GENERATE_READ_PR_REG_CASE(0, pr_read); + GENERATE_READ_PR_REG_CASE(1, pr_read); + GENERATE_READ_PR_REG_CASE(2, pr_read); + GENERATE_READ_PR_REG_CASE(3, pr_read); + GENERATE_READ_PR_REG_CASE(4, pr_read); + GENERATE_READ_PR_REG_CASE(5, pr_read); + GENERATE_READ_PR_REG_CASE(6, pr_read); + GENERATE_READ_PR_REG_CASE(7, pr_read); + GENERATE_READ_PR_REG_CASE(8, pr_read); + GENERATE_READ_PR_REG_CASE(9, pr_read); + GENERATE_READ_PR_REG_CASE(10, pr_read); + GENERATE_READ_PR_REG_CASE(11, pr_read); + GENERATE_READ_PR_REG_CASE(12, pr_read); + GENERATE_READ_PR_REG_CASE(13, pr_read); + GENERATE_READ_PR_REG_CASE(14, pr_read); + GENERATE_READ_PR_REG_CASE(15, pr_read); + default: + BUG(); /* Can't happen */ + } +} + +void write_protection_region(const pr_t *pr_write, uint8_t sel) +{ + prepare_selector(&sel); + + switch ( sel ) + { + GENERATE_WRITE_PR_REG_CASE(0, pr_write); + GENERATE_WRITE_PR_REG_CASE(1, pr_write); + GENERATE_WRITE_PR_REG_CASE(2, pr_write); + GENERATE_WRITE_PR_REG_CASE(3, pr_write); + GENERATE_WRITE_PR_REG_CASE(4, pr_write); + GENERATE_WRITE_PR_REG_CASE(5, pr_write); + GENERATE_WRITE_PR_REG_CASE(6, pr_write); + GENERATE_WRITE_PR_REG_CASE(7, pr_write); + GENERATE_WRITE_PR_REG_CASE(8, pr_write); + GENERATE_WRITE_PR_REG_CASE(9, pr_write); + GENERATE_WRITE_PR_REG_CASE(10, pr_write); + GENERATE_WRITE_PR_REG_CASE(11, pr_write); + GENERATE_WRITE_PR_REG_CASE(12, pr_write); + GENERATE_WRITE_PR_REG_CASE(13, pr_write); + GENERATE_WRITE_PR_REG_CASE(14, pr_write); + GENERATE_WRITE_PR_REG_CASE(15, pr_write); + default: + BUG(); /* Can't happen */ + } +} +#endif + void __init setup_mm(void) { BUG_ON("unimplemented"); -- 2.34.1
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |