[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v7 14/34] ARM: vGICv3: handle virtual LPI pending and property tables
Allow a guest to provide the address and size for the memory regions it has reserved for the GICv3 pending and property tables. We sanitise the various fields of the respective redistributor registers. The MMIO read and write accesses are protected by locks, to avoid any changing of the property or pending table address while a redistributor is live and also to protect the non-atomic vgic_reg64_extract() function on the MMIO read side. Signed-off-by: Andre Przywara <andre.przywara@xxxxxxx> --- xen/arch/arm/vgic-v3.c | 158 +++++++++++++++++++++++++++++++++++++++---- xen/include/asm-arm/domain.h | 5 ++ 2 files changed, 151 insertions(+), 12 deletions(-) diff --git a/xen/arch/arm/vgic-v3.c b/xen/arch/arm/vgic-v3.c index 6bc3d76..fd6a777 100644 --- a/xen/arch/arm/vgic-v3.c +++ b/xen/arch/arm/vgic-v3.c @@ -19,12 +19,14 @@ */ #include <xen/bitops.h> +#include <xen/domain_page.h> #include <xen/lib.h> #include <xen/init.h> #include <xen/softirq.h> #include <xen/irq.h> #include <xen/sched.h> #include <xen/sizes.h> +#include <xen/vmap.h> #include <asm/current.h> #include <asm/mmio.h> #include <asm/gic_v3_defs.h> @@ -230,12 +232,25 @@ static int __vgic_v3_rdistr_rd_mmio_read(struct vcpu *v, mmio_info_t *info, goto read_reserved; case VREG64(GICR_PROPBASER): - /* LPI's not implemented */ - goto read_as_zero_64; + if ( !v->domain->arch.vgic.has_its ) + goto read_as_zero_64; + if ( !vgic_reg64_check_access(dabt) ) goto bad_width; + + vgic_lock(v); + *r = vgic_reg64_extract(v->domain->arch.vgic.rdist_propbase, info); + vgic_unlock(v); + return 1; case VREG64(GICR_PENDBASER): - /* LPI's not implemented */ - goto read_as_zero_64; + if ( !v->domain->arch.vgic.has_its ) + goto read_as_zero_64; + if ( !vgic_reg64_check_access(dabt) ) goto bad_width; + + spin_lock(&v->arch.vgic.lock); + *r = vgic_reg64_extract(v->arch.vgic.rdist_pendbase, info); + *r &= ~GICR_PENDBASER_PTZ; /* WO, reads as 0 */ + spin_unlock(&v->arch.vgic.lock); + return 1; case 0x0080: goto read_reserved; @@ -332,11 +347,95 @@ read_unknown: return 1; } +static uint64_t vgic_sanitise_field(uint64_t reg, uint64_t field_mask, + int field_shift, + uint64_t (*sanitise_fn)(uint64_t)) +{ + uint64_t field = (reg & field_mask) >> field_shift; + + field = sanitise_fn(field) << field_shift; + + return (reg & ~field_mask) | field; +} + +/* We want to avoid outer shareable. */ +static uint64_t vgic_sanitise_shareability(uint64_t field) +{ + switch ( field ) + { + case GIC_BASER_OuterShareable: + return GIC_BASER_InnerShareable; + default: + return field; + } +} + +/* Avoid any inner non-cacheable mapping. */ +static uint64_t vgic_sanitise_inner_cacheability(uint64_t field) +{ + switch ( field ) + { + case GIC_BASER_CACHE_nCnB: + case GIC_BASER_CACHE_nC: + return GIC_BASER_CACHE_RaWb; + default: + return field; + } +} + +/* Non-cacheable or same-as-inner are OK. */ +static uint64_t vgic_sanitise_outer_cacheability(uint64_t field) +{ + switch ( field ) + { + case GIC_BASER_CACHE_SameAsInner: + case GIC_BASER_CACHE_nC: + return field; + default: + return GIC_BASER_CACHE_nC; + } +} + +static uint64_t sanitize_propbaser(uint64_t reg) +{ + reg = vgic_sanitise_field(reg, GICR_PROPBASER_SHAREABILITY_MASK, + GICR_PROPBASER_SHAREABILITY_SHIFT, + vgic_sanitise_shareability); + reg = vgic_sanitise_field(reg, GICR_PROPBASER_INNER_CACHEABILITY_MASK, + GICR_PROPBASER_INNER_CACHEABILITY_SHIFT, + vgic_sanitise_inner_cacheability); + reg = vgic_sanitise_field(reg, GICR_PROPBASER_OUTER_CACHEABILITY_MASK, + GICR_PROPBASER_OUTER_CACHEABILITY_SHIFT, + vgic_sanitise_outer_cacheability); + + reg &= ~GICR_PROPBASER_RES0_MASK; + + return reg; +} + +static uint64_t sanitize_pendbaser(uint64_t reg) +{ + reg = vgic_sanitise_field(reg, GICR_PENDBASER_SHAREABILITY_MASK, + GICR_PENDBASER_SHAREABILITY_SHIFT, + vgic_sanitise_shareability); + reg = vgic_sanitise_field(reg, GICR_PENDBASER_INNER_CACHEABILITY_MASK, + GICR_PENDBASER_INNER_CACHEABILITY_SHIFT, + vgic_sanitise_inner_cacheability); + reg = vgic_sanitise_field(reg, GICR_PENDBASER_OUTER_CACHEABILITY_MASK, + GICR_PENDBASER_OUTER_CACHEABILITY_SHIFT, + vgic_sanitise_outer_cacheability); + + reg &= ~GICR_PENDBASER_RES0_MASK; + + return reg; +} + static int __vgic_v3_rdistr_rd_mmio_write(struct vcpu *v, mmio_info_t *info, uint32_t gicr_reg, register_t r) { struct hsr_dabt dabt = info->dabt; + uint64_t reg; switch ( gicr_reg ) { @@ -367,36 +466,71 @@ static int __vgic_v3_rdistr_rd_mmio_write(struct vcpu *v, mmio_info_t *info, goto write_impl_defined; case VREG64(GICR_SETLPIR): - /* LPI is not implemented */ + /* LPIs without an ITS are not implemented */ goto write_ignore_64; case VREG64(GICR_CLRLPIR): - /* LPI is not implemented */ + /* LPIs without an ITS are not implemented */ goto write_ignore_64; case 0x0050: goto write_reserved; case VREG64(GICR_PROPBASER): - /* LPI is not implemented */ - goto write_ignore_64; + if ( !v->domain->arch.vgic.has_its ) + goto write_ignore_64; + if ( !vgic_reg64_check_access(dabt) ) goto bad_width; + + vgic_lock(v); + + /* + * Writing PROPBASER with any redistributor having LPIs enabled + * is UNPREDICTABLE. + */ + if ( !(v->domain->arch.vgic.rdists_enabled) ) + { + reg = v->domain->arch.vgic.rdist_propbase; + vgic_reg64_update(®, r, info); + reg = sanitize_propbaser(reg); + v->domain->arch.vgic.rdist_propbase = reg; + } + + vgic_unlock(v); + + return 1; case VREG64(GICR_PENDBASER): - /* LPI is not implemented */ - goto write_ignore_64; + if ( !v->domain->arch.vgic.has_its ) + goto write_ignore_64; + if ( !vgic_reg64_check_access(dabt) ) goto bad_width; + + spin_lock(&v->arch.vgic.lock); + + /* Writing PENDBASER with LPIs enabled is UNPREDICTABLE. */ + if ( !(v->arch.vgic.flags & VGIC_V3_LPIS_ENABLED) ) + { + reg = v->arch.vgic.rdist_pendbase; + vgic_reg64_update(®, r, info); + reg = sanitize_pendbaser(reg); + v->arch.vgic.rdist_pendbase = reg; + } + + spin_unlock(&v->arch.vgic.lock); + + return 1; case 0x0080: goto write_reserved; case VREG64(GICR_INVLPIR): - /* LPI is not implemented */ + /* LPIs without an ITS are not implemented */ goto write_ignore_64; case 0x00A8: goto write_reserved; case VREG64(GICR_INVALLR): - /* LPI is not implemented */ + /* LPIs without an ITS are not implemented */ goto write_ignore_64; case 0x00B8: diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h index 583d491..365a4ef 100644 --- a/xen/include/asm-arm/domain.h +++ b/xen/include/asm-arm/domain.h @@ -109,10 +109,14 @@ struct arch_domain } *rdist_regions; int nr_regions; /* Number of rdist regions */ uint32_t rdist_stride; /* Re-Distributor stride */ + unsigned long int nr_lpis; + uint64_t rdist_propbase; struct rb_root its_devices; /* Devices mapped to an ITS */ spinlock_t its_devices_lock; /* Protects the its_devices tree */ struct radix_tree_root pend_lpi_tree; /* Stores struct pending_irq's */ rwlock_t pend_lpi_tree_lock; /* Protects the pend_lpi_tree */ + bool rdists_enabled; /* Is any redistributor enabled? */ + bool has_its; #endif } vgic; @@ -259,6 +263,7 @@ struct arch_vcpu /* GICv3: redistributor base and flags for this vCPU */ paddr_t rdist_base; + uint64_t rdist_pendbase; #define VGIC_V3_RDIST_LAST (1 << 0) /* last vCPU of the rdist */ #define VGIC_V3_LPIS_ENABLED (1 << 1) uint8_t flags; -- 2.8.2 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |