[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [RFC PATCH 11/24] ARM: vGICv3: handle virtual LPI pending and property tables
Hi Andre, On 28/09/16 19:24, Andre Przywara wrote: Allow a guest to provide the address and size for the memory regions it has reserved for the GICv3 pending and property tables. We sanitise the various fields of the respective redistributor registers and map those pages into Xen's address space to have easy access. Signed-off-by: Andre Przywara <andre.przywara@xxxxxxx> --- xen/arch/arm/vgic-v3.c | 189 ++++++++++++++++++++++++++++++++++++++---- xen/arch/arm/vgic.c | 4 + xen/include/asm-arm/domain.h | 7 +- xen/include/asm-arm/gic-its.h | 10 ++- xen/include/asm-arm/vgic.h | 3 + 5 files changed, 197 insertions(+), 16 deletions(-) diff --git a/xen/arch/arm/vgic-v3.c b/xen/arch/arm/vgic-v3.c index e9b6490..8fe8386 100644 --- a/xen/arch/arm/vgic-v3.c +++ b/xen/arch/arm/vgic-v3.c @@ -20,12 +20,14 @@ #include <xen/bitops.h> #include <xen/config.h> +#include <xen/domain_page.h> #include <xen/lib.h> #include <xen/init.h> #include <xen/softirq.h> #include <xen/irq.h> #include <xen/sched.h> #include <xen/sizes.h> +#include <xen/vmap.h> #include <asm/current.h> #include <asm/mmio.h> #include <asm/gic_v3_defs.h> @@ -228,12 +230,14 @@ static int __vgic_v3_rdistr_rd_mmio_read(struct vcpu *v, mmio_info_t *info, goto read_reserved; case VREG64(GICR_PROPBASER): - /* LPI's not implemented */ - goto read_as_zero_64; + if ( !vgic_reg64_check_access(dabt) ) goto bad_width; + *r = vgic_reg64_extract(v->domain->arch.vgic.rdist_propbase, info); + return 1; case VREG64(GICR_PENDBASER): - /* LPI's not implemented */ - goto read_as_zero_64; + if ( !vgic_reg64_check_access(dabt) ) goto bad_width; + *r = vgic_reg64_extract(v->arch.vgic.rdist_pendbase, info); The field PTZ read as 0. + return 1; case 0x0080: goto read_reserved; @@ -301,11 +305,6 @@ bad_width: domain_crash_synchronous(); return 0; -read_as_zero_64: - if ( !vgic_reg64_check_access(dabt) ) goto bad_width; - *r = 0; - return 1; - read_as_zero_32: if ( dabt.size != DABT_WORD ) goto bad_width; *r = 0; @@ -330,11 +329,149 @@ read_unknown: return 1; } +static uint64_t vgic_sanitise_field(uint64_t reg, uint64_t field_mask, + int field_shift, + uint64_t (*sanitise_fn)(uint64_t)) +{ + uint64_t field = (reg & field_mask) >> field_shift; + + field = sanitise_fn(field) << field_shift; Newline here please. + return (reg & ~field_mask) | field; +} + +/* We want to avoid outer shareable. */ +static uint64_t vgic_sanitise_shareability(uint64_t field) +{ + switch (field) { + case GIC_BASER_OuterShareable: + return GIC_BASER_InnerShareable; + default: + return field; + } +} I am not sure to understand why we need to sanitise the value here. From my understanding of the spec (see 8.11.18 in IHI 0069C) we should support any shareability/cacheability, correct? + +/* Avoid any inner non-cacheable mapping. */ +static uint64_t vgic_sanitise_inner_cacheability(uint64_t field) +{ + switch (field) { + case GIC_BASER_CACHE_nCnB: + case GIC_BASER_CACHE_nC: + return GIC_BASER_CACHE_RaWb; + default: + return field; + } +} + +/* Non-cacheable or same-as-inner are OK. */ +static uint64_t vgic_sanitise_outer_cacheability(uint64_t field) +{ + switch (field) { + case GIC_BASER_CACHE_SameAsInner: + case GIC_BASER_CACHE_nC: + return field; + default: + return GIC_BASER_CACHE_nC; + } +} + +static uint64_t sanitize_propbaser(uint64_t reg) +{ + reg = vgic_sanitise_field(reg, GICR_PROPBASER_SHAREABILITY_MASK, + GICR_PROPBASER_SHAREABILITY_SHIFT, + vgic_sanitise_shareability); + reg = vgic_sanitise_field(reg, GICR_PROPBASER_INNER_CACHEABILITY_MASK, + GICR_PROPBASER_INNER_CACHEABILITY_SHIFT, + vgic_sanitise_inner_cacheability); + reg = vgic_sanitise_field(reg, GICR_PROPBASER_OUTER_CACHEABILITY_MASK, + GICR_PROPBASER_OUTER_CACHEABILITY_SHIFT, + vgic_sanitise_outer_cacheability); + + reg &= ~PROPBASER_RES0_MASK; + reg &= ~GENMASK(51, 48); Why do you mask the bits 51:48. There is no restriction in Xen about the size of the IPA (though 52 bits support is part of ARMv8.2), so we should avoid to open-code mask everywhere in the code. Otherwise it will be more painful to extend the number of bits supported. FWIW, all the p2m code is checking whether the IPA is supported. + return reg; +} + +static uint64_t sanitize_pendbaser(uint64_t reg) +{ + reg = vgic_sanitise_field(reg, GICR_PENDBASER_SHAREABILITY_MASK, + GICR_PENDBASER_SHAREABILITY_SHIFT, + vgic_sanitise_shareability); + reg = vgic_sanitise_field(reg, GICR_PENDBASER_INNER_CACHEABILITY_MASK, + GICR_PENDBASER_INNER_CACHEABILITY_SHIFT, + vgic_sanitise_inner_cacheability); + reg = vgic_sanitise_field(reg, GICR_PENDBASER_OUTER_CACHEABILITY_MASK, + GICR_PENDBASER_OUTER_CACHEABILITY_SHIFT, + vgic_sanitise_outer_cacheability); + + reg &= ~PENDBASER_RES0_MASK; + reg &= ~GENMASK(51, 48); Ditto. + return reg; +} + +/* + * Allow mapping some parts of guest memory into Xen's VA space to have easy + * access to it. This is to allow ITS configuration data to be held in + * guest memory and avoid using Xen memory for that. + */ +void *map_guest_pages(struct domain *d, paddr_t guest_addr, int nr_pages) Please pass a gfn_t rather than paddr_t. +{ + mfn_t onepage; + mfn_t *pages; s/pages/mfns/ + int i; + void *ptr; + + /* TODO: free previous mapping, change prototype? use get-put-put? */ + + guest_addr &= PAGE_MASK; + + if ( nr_pages == 1 ) + { + pages = &onepage; + } else + { + pages = xmalloc_array(mfn_t, nr_pages); + if ( !pages ) + return NULL; + } + + for (i = 0; i < nr_pages; i++) + { + get_page_from_gfn(d, (guest_addr >> PAGE_SHIFT) + i, NULL, P2M_ALLOC); get_page_from_gfn can fail if you try to get a page on memory that is not baked by a RAM region. Also get_page_from_gfn will work on foreign mapping, we don't want the guest using foreing memory (e.g memory belonging to another domain) for the ITS internal memory. Also, please try to pay attention for error path whilst you write code. It is a pain to handle them after the code has been written. I will try to point them when I spot it. + pages[i] = _mfn((guest_addr + i * PAGE_SIZE) >> PAGE_SHIFT); You cannot assume a 1:1 mapping between the IPA and the PA. Please use the struct page_info returned by get_page_from_gfn + } + + ptr = vmap(pages, nr_pages); I am not a big fan of the vmap solution for various reasons:- the VMAP area is small (only 1GB) it will not scale (you seem to use it to map pretty much all memory provisioned for the ITS) - writing in a register cannot fail, how do you co-op with that?I think the best approach here is to use a similar approach as copy_*_guests helpers but dealing with IPA rather than guest VA. + + if ( nr_pages > 1 ) + xfree(pages); + + return ptr; +} + +void unmap_guest_pages(void *va, int nr_pages) +{ + paddr_t pa; + unsigned long i; + + if ( !va ) + return; + + va = (void *)((uintptr_t)va & PAGE_MASK); + pa = virt_to_maddr(va); + + vunmap(va); + for (i = 0; i < nr_pages; i++) + put_page(mfn_to_page((pa >> PAGE_SHIFT) + i)); + + return; +} + static int __vgic_v3_rdistr_rd_mmio_write(struct vcpu *v, mmio_info_t *info, uint32_t gicr_reg, register_t r) { struct hsr_dabt dabt = info->dabt; + uint64_t reg; switch ( gicr_reg ) { @@ -375,13 +512,37 @@ static int __vgic_v3_rdistr_rd_mmio_write(struct vcpu *v, mmio_info_t *info, case 0x0050: goto write_reserved; - case VREG64(GICR_PROPBASER): - /* LPI is not implemented */ - goto write_ignore_64; + case VREG64(GICR_PROPBASER): { Coding style, the { should be on a line. + int nr_pages; unsigned int + + if ( info->dabt.size < DABT_WORD ) goto bad_width; Newline here for clarity. Also please use vgic_reg64_check_access rather than open-coding it. + if ( v->arch.vgic.flags & VGIC_V3_LPIS_ENABLED ) From my understanding VGIC_V3_LPIS_ENABLED is set when the guest enable LPIs on this re-distributor. However, this check is not safe as GICR_CTLR.Enable_LPIs may be set concurrently (the re-distributors are accessible from any vCPU). Also, when ITS is not available we should avoid to handle the register (i.e treating as write ignore). My rational here is we should limit the amount of emulation exposed to the guest whenever it is possible. + return 1; I think we should at least print warning as writing to GICR_PROPBASER when GICR_CTLR.Enable_LPIs is set is unpredictable. IHMO, I would even crash the guest. The code below likely needs locking as the property table is common to all re-distributor, hence could be modified concurrently. Also, I would like to see a comment on top of emulation of GICR_TYPER to mention that all re-distributor shares the same common property table (GICR_TYPER.CommonLPIAff = 0). + + reg = v->domain->arch.vgic.rdist_propbase; + vgic_reg64_update(®, r, info); + reg = sanitize_propbaser(reg); + v->domain->arch.vgic.rdist_propbase = reg; + nr_pages = BIT((v->domain->arch.vgic.rdist_propbase & 0x1f) + 1) - 8192; The spec (see 8.11.19): "If the value of this field is larger than the value of GICD_TYPER.IDbits, the GICD_TYPER.IDbits value applies). We don't want to map more than necessary uin + nr_pages = DIV_ROUND_UP(nr_pages, PAGE_SIZE); + unmap_guest_pages(v->domain->arch.vgic.proptable, nr_pages); This looks wrong to me. A guest could specify a size different from the previous write. + v->domain->arch.vgic.proptable = map_guest_pages(v->domain, + reg & GENMASK(47, 12), + nr_pages); + return 1; + } case VREG64(GICR_PENDBASER): - /* LPI is not implemented */ - goto write_ignore_64; + if ( info->dabt.size < DABT_WORD ) goto bad_width; Newline + vgic_reg64_check_access Also, you don't check whether the LPIs have been enabled here.All my comments above stands. Furthermore, the code is not correctly indented (you are using hard tab). + reg = v->arch.vgic.rdist_pendbase; + vgic_reg64_update(®, r, info); + reg = sanitize_pendbaser(reg); + v->arch.vgic.rdist_pendbase = reg; + + unmap_guest_pages(v->arch.vgic.pendtable, 16); + v->arch.vgic.pendtable = map_guest_pages(v->domain, + reg & GENMASK(47, 12), 16); The pending table is never touched by Xen. So I would avoid to mapping it. + return 1; case 0x0080: goto write_reserved; diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c index b961551..4d9304f 100644 --- a/xen/arch/arm/vgic.c +++ b/xen/arch/arm/vgic.c @@ -488,6 +488,10 @@ struct pending_irq *lpi_to_pending(struct vcpu *v, unsigned int lpi, empty->pirq.irq = lpi; } + /* Update the enabled status */ + if ( gicv3_lpi_is_enabled(v->domain, lpi) ) + set_bit(GIC_IRQ_GUEST_ENABLED, &empty->pirq.status); + return &empty->pirq; } diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h index ae8a9de..0cd3500 100644 --- a/xen/include/asm-arm/domain.h +++ b/xen/include/asm-arm/domain.h @@ -109,6 +109,8 @@ struct arch_domain } *rdist_regions; int nr_regions; /* Number of rdist regions */ uint32_t rdist_stride; /* Re-Distributor stride */ + uint64_t rdist_propbase; + uint8_t *proptable; #endif } vgic; @@ -247,7 +249,10 @@ struct arch_vcpu /* GICv3: redistributor base and flags for this vCPU */ paddr_t rdist_base; -#define VGIC_V3_RDIST_LAST (1 << 0) /* last vCPU of the rdist */ +#define VGIC_V3_RDIST_LAST (1 << 0) /* last vCPU of the rdist */ Please avoid spurious change. We don't require in Xen to have all the constant aligned. This also makes harder to got through the changes. +#define VGIC_V3_LPIS_ENABLED (1 << 1) Please document the purpose of this bit. + uint64_t rdist_pendbase; + unsigned long *pendtable; uint8_t flags; struct list_head pending_lpi_list; } vgic; diff --git a/xen/include/asm-arm/gic-its.h b/xen/include/asm-arm/gic-its.h index 1f881c0..3b2e5c0 100644 --- a/xen/include/asm-arm/gic-its.h +++ b/xen/include/asm-arm/gic-its.h @@ -139,7 +139,11 @@ int gicv3_lpi_drop_host_lpi(struct host_its *its, static inline int gicv3_lpi_get_priority(struct domain *d, uint32_t lpi) s/lpi/vlpi/ to make clear this is a function deal with virtual LPIs. { - return GIC_PRI_IRQ; + return d->arch.vgic.proptable[lpi - 8192] & 0xfc; I think this is the best place to ask this question, I don't see any code within this series to check that the guest effectively initialized proptable and the size is correct (you don't check that the guest provided enough memory compare to the vLPI suggested). FWIW, I have only already made those comments back Vijay sent his patch series. It might be worth for you to look at what he did regarding all the sanity checks. +} Newline here for clarity. +static inline bool gicv3_lpi_is_enabled(struct domain *d, uint32_t lpi) Ditto for the naming. +{ + return d->arch.vgic.proptable[lpi - 8192] & LPI_PROP_ENABLED; } #else @@ -185,6 +189,10 @@ static inline int gicv3_lpi_get_priority(struct domain *d, uint32_t lpi) { return GIC_PRI_IRQ; } Newline here for clarity. +static inline bool gicv3_lpi_is_enabled(struct domain *d, uint32_t lpi) +{ + return false; +} #endif /* CONFIG_HAS_ITS */ diff --git a/xen/include/asm-arm/vgic.h b/xen/include/asm-arm/vgic.h index 4e29ba6..2b216cc 100644 --- a/xen/include/asm-arm/vgic.h +++ b/xen/include/asm-arm/vgic.h @@ -285,6 +285,9 @@ VGIC_REG_HELPERS(32, 0x3); #undef VGIC_REG_HELPERS +void *map_guest_pages(struct domain *d, paddr_t guest_addr, int nr_pages); +void unmap_guest_pages(void *va, int nr_pages); + enum gic_sgi_mode; /* Regards, -- Julien Grall _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |