[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH v5 15/30] ARM: vGICv3: handle virtual LPI pending and property tables
On Thu, 6 Apr 2017, Andre Przywara wrote: > Allow a guest to provide the address and size for the memory regions > it has reserved for the GICv3 pending and property tables. > We sanitise the various fields of the respective redistributor > registers and map those pages into Xen's address space to have easy > access. > This introduces a function to read and write from and to guest memory, > to be later able to access the tables located there. > This vgic_access_guest_memory() function has been written by Vijaya Kumar > as part of an earlier series. > > Signed-off-by: Andre Przywara <andre.przywara@xxxxxxx> > --- > xen/arch/arm/vgic-v3.c | 152 > ++++++++++++++++++++++++++++++++++++++----- > xen/arch/arm/vgic.c | 39 +++++++++++ > xen/include/asm-arm/domain.h | 6 +- > xen/include/asm-arm/vgic.h | 3 + > 4 files changed, 182 insertions(+), 18 deletions(-) > > diff --git a/xen/arch/arm/vgic-v3.c b/xen/arch/arm/vgic-v3.c > index 2a14305..0623803 100644 > --- a/xen/arch/arm/vgic-v3.c > +++ b/xen/arch/arm/vgic-v3.c > @@ -19,12 +19,14 @@ > */ > > #include <xen/bitops.h> > +#include <xen/domain_page.h> > #include <xen/lib.h> > #include <xen/init.h> > #include <xen/softirq.h> > #include <xen/irq.h> > #include <xen/sched.h> > #include <xen/sizes.h> > +#include <xen/vmap.h> > #include <asm/current.h> > #include <asm/mmio.h> > #include <asm/gic_v3_defs.h> > @@ -228,12 +230,21 @@ static int __vgic_v3_rdistr_rd_mmio_read(struct vcpu > *v, mmio_info_t *info, > goto read_reserved; > > case VREG64(GICR_PROPBASER): > - /* LPI's not implemented */ > - goto read_as_zero_64; > + if ( !vgic_reg64_check_access(dabt) ) goto bad_width; > + > + spin_lock(&v->arch.vgic.lock); > + *r = vgic_reg64_extract(v->domain->arch.vgic.rdist_propbase, info); > + spin_unlock(&v->arch.vgic.lock); > + return 1; > > case VREG64(GICR_PENDBASER): > - /* LPI's not implemented */ > - goto read_as_zero_64; > + if ( !vgic_reg64_check_access(dabt) ) goto bad_width; > + > + spin_lock(&v->arch.vgic.lock); > + *r = vgic_reg64_extract(v->arch.vgic.rdist_pendbase, info); > + *r &= ~GICR_PENDBASER_PTZ; /* WO, reads as 0 */ > + spin_unlock(&v->arch.vgic.lock); > + return 1; > > case 0x0080: > goto read_reserved; > @@ -301,11 +312,6 @@ bad_width: > domain_crash_synchronous(); > return 0; > > -read_as_zero_64: > - if ( !vgic_reg64_check_access(dabt) ) goto bad_width; > - *r = 0; > - return 1; > - > read_as_zero_32: > if ( dabt.size != DABT_WORD ) goto bad_width; > *r = 0; > @@ -330,11 +336,95 @@ read_unknown: > return 1; > } > > +static uint64_t vgic_sanitise_field(uint64_t reg, uint64_t field_mask, > + int field_shift, > + uint64_t (*sanitise_fn)(uint64_t)) > +{ > + uint64_t field = (reg & field_mask) >> field_shift; > + > + field = sanitise_fn(field) << field_shift; > + > + return (reg & ~field_mask) | field; > +} > + > +/* We want to avoid outer shareable. */ > +static uint64_t vgic_sanitise_shareability(uint64_t field) > +{ > + switch ( field ) > + { > + case GIC_BASER_OuterShareable: > + return GIC_BASER_InnerShareable; > + default: > + return field; > + } > +} > + > +/* Avoid any inner non-cacheable mapping. */ > +static uint64_t vgic_sanitise_inner_cacheability(uint64_t field) > +{ > + switch ( field ) > + { > + case GIC_BASER_CACHE_nCnB: > + case GIC_BASER_CACHE_nC: > + return GIC_BASER_CACHE_RaWb; > + default: > + return field; > + } > +} > + > +/* Non-cacheable or same-as-inner are OK. */ > +static uint64_t vgic_sanitise_outer_cacheability(uint64_t field) > +{ > + switch ( field ) > + { > + case GIC_BASER_CACHE_SameAsInner: > + case GIC_BASER_CACHE_nC: > + return field; > + default: > + return GIC_BASER_CACHE_nC; > + } > +} > + > +static uint64_t sanitize_propbaser(uint64_t reg) > +{ > + reg = vgic_sanitise_field(reg, GICR_PROPBASER_SHAREABILITY_MASK, > + GICR_PROPBASER_SHAREABILITY_SHIFT, > + vgic_sanitise_shareability); > + reg = vgic_sanitise_field(reg, GICR_PROPBASER_INNER_CACHEABILITY_MASK, > + GICR_PROPBASER_INNER_CACHEABILITY_SHIFT, > + vgic_sanitise_inner_cacheability); > + reg = vgic_sanitise_field(reg, GICR_PROPBASER_OUTER_CACHEABILITY_MASK, > + GICR_PROPBASER_OUTER_CACHEABILITY_SHIFT, > + vgic_sanitise_outer_cacheability); > + > + reg &= ~GICR_PROPBASER_RES0_MASK; > + > + return reg; > +} > + > +static uint64_t sanitize_pendbaser(uint64_t reg) > +{ > + reg = vgic_sanitise_field(reg, GICR_PENDBASER_SHAREABILITY_MASK, > + GICR_PENDBASER_SHAREABILITY_SHIFT, > + vgic_sanitise_shareability); > + reg = vgic_sanitise_field(reg, GICR_PENDBASER_INNER_CACHEABILITY_MASK, > + GICR_PENDBASER_INNER_CACHEABILITY_SHIFT, > + vgic_sanitise_inner_cacheability); > + reg = vgic_sanitise_field(reg, GICR_PENDBASER_OUTER_CACHEABILITY_MASK, > + GICR_PENDBASER_OUTER_CACHEABILITY_SHIFT, > + vgic_sanitise_outer_cacheability); > + > + reg &= ~GICR_PENDBASER_RES0_MASK; > + > + return reg; > +} > + > static int __vgic_v3_rdistr_rd_mmio_write(struct vcpu *v, mmio_info_t *info, > uint32_t gicr_reg, > register_t r) > { > struct hsr_dabt dabt = info->dabt; > + uint64_t reg; > > switch ( gicr_reg ) > { > @@ -365,36 +455,64 @@ static int __vgic_v3_rdistr_rd_mmio_write(struct vcpu > *v, mmio_info_t *info, > goto write_impl_defined; > > case VREG64(GICR_SETLPIR): > - /* LPI is not implemented */ > + /* LPIs without an ITS are not implemented */ > goto write_ignore_64; > > case VREG64(GICR_CLRLPIR): > - /* LPI is not implemented */ > + /* LPIs without an ITS are not implemented */ > goto write_ignore_64; > > case 0x0050: > goto write_reserved; > > case VREG64(GICR_PROPBASER): > - /* LPI is not implemented */ > - goto write_ignore_64; > + if ( !vgic_reg64_check_access(dabt) ) goto bad_width; > + > + spin_lock(&v->arch.vgic.lock); > + > + /* Writing PROPBASER with LPIs enabled is UNPREDICTABLE. */ > + if ( !(v->arch.vgic.flags & VGIC_V3_LPIS_ENABLED) ) > + { > + reg = v->domain->arch.vgic.rdist_propbase; > + vgic_reg64_update(®, r, info); > + reg = sanitize_propbaser(reg); > + v->domain->arch.vgic.rdist_propbase = reg; > + } > + > + spin_unlock(&v->arch.vgic.lock); > + > + return 1; > > case VREG64(GICR_PENDBASER): > - /* LPI is not implemented */ > - goto write_ignore_64; > + if ( !vgic_reg64_check_access(dabt) ) goto bad_width; > + > + spin_lock(&v->arch.vgic.lock); > + > + /* Writing PENDBASER with LPIs enabled is UNPREDICTABLE. */ > + if ( !(v->arch.vgic.flags & VGIC_V3_LPIS_ENABLED) ) > + { > + reg = v->arch.vgic.rdist_pendbase; > + vgic_reg64_update(®, r, info); > + reg = sanitize_pendbaser(reg); > + v->arch.vgic.rdist_pendbase = reg; > + } > + > + spin_unlock(&v->arch.vgic.lock); > + > + return 1; > > case 0x0080: > goto write_reserved; > > case VREG64(GICR_INVLPIR): > - /* LPI is not implemented */ > + /* LPIs without an ITS are not implemented */ > goto write_ignore_64; > > case 0x00A8: > goto write_reserved; > > case VREG64(GICR_INVALLR): > - /* LPI is not implemented */ > + /* LPIs without an ITS are not implemented */ > goto write_ignore_64; > > case 0x00B8: > diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c > index cd9a2a5..9b0dc3d 100644 > --- a/xen/arch/arm/vgic.c > +++ b/xen/arch/arm/vgic.c > @@ -20,6 +20,7 @@ > #include <xen/bitops.h> > #include <xen/lib.h> > #include <xen/init.h> > +#include <xen/domain_page.h> > #include <xen/softirq.h> > #include <xen/irq.h> > #include <xen/sched.h> > @@ -589,6 +590,44 @@ void vgic_free_virq(struct domain *d, unsigned int virq) > clear_bit(virq, d->arch.vgic.allocated_irqs); > } > > +int vgic_access_guest_memory(struct domain *d, paddr_t gpa, void *addr, > + uint32_t size, bool_t is_write) Because there are no callers of this function, I think it breaks the build. > +{ > + struct page_info *page; > + uint64_t offset; > + p2m_type_t p2mt; > + void *p; > + > + page = get_page_from_gfn(d, paddr_to_pfn(gpa), &p2mt, P2M_ALLOC); > + if ( !page ) > + { > + printk(XENLOG_G_ERR "d%d: vITS: Failed to get table entry\n", > + d->domain_id); > + return -EINVAL; > + } > + > + if ( !p2m_is_ram(p2mt) ) > + { > + put_page(page); > + printk(XENLOG_G_ERR "d%d: vITS: with wrong attributes\n", > d->domain_id); > + return -EINVAL; > + } > + > + p = __map_domain_page(page); > + /* Offset within the mapped page */ > + offset = gpa & ~PAGE_MASK; > + > + if ( is_write ) > + memcpy(p + offset, addr, size); > + else > + memcpy(addr, p + offset, size); > + > + unmap_domain_page(p); > + put_page(page); > + > + return 0; > +} > + > /* > * Local variables: > * mode: C > diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h > index 6ee7538..f993292 100644 > --- a/xen/include/asm-arm/domain.h > +++ b/xen/include/asm-arm/domain.h > @@ -109,6 +109,8 @@ struct arch_domain > } *rdist_regions; > int nr_regions; /* Number of rdist regions */ > uint32_t rdist_stride; /* Re-Distributor stride */ > + unsigned long long int nr_lpis; > + uint64_t rdist_propbase; > struct rb_root its_devices; /* Devices mapped to an ITS */ > spinlock_t its_devices_lock; /* Protects the its_devices tree > */ > struct radix_tree_root pend_lpi_tree; /* Stores struct pending_irq's > */ > @@ -256,7 +258,9 @@ struct arch_vcpu > > /* GICv3: redistributor base and flags for this vCPU */ > paddr_t rdist_base; > -#define VGIC_V3_RDIST_LAST (1 << 0) /* last vCPU of the rdist */ > + uint64_t rdist_pendbase; > +#define VGIC_V3_RDIST_LAST (1 << 0) /* last vCPU of the rdist */ > +#define VGIC_V3_LPIS_ENABLED (1 << 1) > uint8_t flags; > } vgic; > > diff --git a/xen/include/asm-arm/vgic.h b/xen/include/asm-arm/vgic.h > index 08d6294..2371960 100644 > --- a/xen/include/asm-arm/vgic.h > +++ b/xen/include/asm-arm/vgic.h > @@ -314,6 +314,9 @@ extern void register_vgic_ops(struct domain *d, const > struct vgic_ops *ops); > int vgic_v2_init(struct domain *d, int *mmio_count); > int vgic_v3_init(struct domain *d, int *mmio_count); > > +int vgic_access_guest_memory(struct domain *d, paddr_t gpa, void *addr, > + uint32_t size, bool_t is_write); > + > extern int domain_vgic_register(struct domain *d, int *mmio_count); > extern int vcpu_vgic_free(struct vcpu *v); > extern bool vgic_to_sgi(struct vcpu *v, register_t sgir, > -- > 2.8.2 > _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |