[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH v6 24/36] ARM: vITS: add command handling stub and MMIO emulation
Hi Andre, On 04/07/2017 06:32 PM, Andre Przywara wrote: Emulate the memory mapped ITS registers and provide a stub to introduce the ITS command handling framework (but without actually emulating any commands at this time). Signed-off-by: Andre Przywara <andre.przywara@xxxxxxx> --- xen/arch/arm/vgic-v3-its.c | 512 +++++++++++++++++++++++++++++++++++++++ xen/include/asm-arm/gic_v3_its.h | 3 + 2 files changed, 515 insertions(+) diff --git a/xen/arch/arm/vgic-v3-its.c b/xen/arch/arm/vgic-v3-its.c index 065ffe2..a171a3b 100644 --- a/xen/arch/arm/vgic-v3-its.c +++ b/xen/arch/arm/vgic-v3-its.c @@ -67,6 +67,9 @@ struct vits_itte uint16_t pad; }; +#define GITS_BASER_RO_MASK (GITS_BASER_TYPE_MASK | \ + (31UL << GITS_BASER_ENTRY_SIZE_SHIFT)) + int vgic_v3_its_init_domain(struct domain *d) { spin_lock_init(&d->arch.vgic.its_devices_lock); @@ -80,6 +83,515 @@ void vgic_v3_its_free_domain(struct domain *d) ASSERT(RB_EMPTY_ROOT(&d->arch.vgic.its_devices)); } +/************************************** + * Functions that handle ITS commands * + **************************************/ + +static uint64_t its_cmd_mask_field(uint64_t *its_cmd, unsigned int word, + unsigned int shift, unsigned int size) +{ + return (le64_to_cpu(its_cmd[word]) >> shift) & (BIT(size) - 1); +} + +#define its_cmd_get_command(cmd) its_cmd_mask_field(cmd, 0, 0, 8) +#define its_cmd_get_deviceid(cmd) its_cmd_mask_field(cmd, 0, 32, 32) +#define its_cmd_get_size(cmd) its_cmd_mask_field(cmd, 1, 0, 5) +#define its_cmd_get_id(cmd) its_cmd_mask_field(cmd, 1, 0, 32) +#define its_cmd_get_physical_id(cmd) its_cmd_mask_field(cmd, 1, 32, 32) +#define its_cmd_get_collection(cmd) its_cmd_mask_field(cmd, 2, 0, 16) +#define its_cmd_get_target_addr(cmd) its_cmd_mask_field(cmd, 2, 16, 32) +#define its_cmd_get_validbit(cmd) its_cmd_mask_field(cmd, 2, 63, 1) +#define its_cmd_get_ittaddr(cmd) (its_cmd_mask_field(cmd, 2, 8, 44) << 8) + +#define ITS_CMD_BUFFER_SIZE(baser) ((((baser) & 0xff) + 1) << 12) + +/* + * Requires the vcmd_lock to be held. + * TODO: Investigate whether we can be smarter here and don't need to hold + * the lock all of the time. + */ +static int vgic_its_handle_cmds(struct domain *d, struct virt_its *its) +{ + paddr_t addr = its->cbaser & GENMASK(51, 12); + uint64_t command[4]; + uint64_t creadr = its->creadr; + + ASSERT(spin_is_locked(&its->vcmd_lock)); + + if ( its->cwriter >= ITS_CMD_BUFFER_SIZE(its->cbaser) ) + return -1; + + while ( creadr != its->cwriter ) + { + int ret; + + ret = vgic_access_guest_memory(d, addr + creadr, + command, sizeof(command), false); + if ( ret ) + return ret; + + switch ( its_cmd_get_command(command) ) + { + case GITS_CMD_SYNC: + /* We handle ITS commands synchronously, so we ignore SYNC. */ + break; + default: + gdprintk(XENLOG_WARNING, "ITS: unhandled ITS command %lu\n", + its_cmd_get_command(command)); + break; + } + + creadr += ITS_CMD_SIZE; + if ( creadr == ITS_CMD_BUFFER_SIZE(its->cbaser) ) + creadr = 0; + its->creadr = creadr; /* allow the guest to see the progress */ I hope you know that the compiler can decide to drop the temporary variable for optimization? ;) So it may decide to write-back everytime in its->creadr. + + if ( ret ) + gdprintk(XENLOG_WARNING, + "ITS: ITS command error %d while handling command %lu\n", + ret, its_cmd_get_command(command)); + } + + return 0; +} + +/***************************** + * ITS registers read access * + *****************************/ + +/* Identifying as an ARM IP, using "X" as the product ID. */ +#define GITS_IIDR_VALUE 0x5800034c Do we need to request ARM to register this value? Preventing someone to re-use it for another purpose in the future. + +static int vgic_v3_its_mmio_read(struct vcpu *v, mmio_info_t *info, + register_t *r, void *priv) +{ + struct virt_its *its = priv; + uint64_t reg; + + switch ( info->gpa & 0xffff ) + { + case VREG32(GITS_CTLR): + { + /* + * We try to avoid waiting for the command queue lock and report + * non-quiescent if that lock is already taken. + */ + bool have_cmd_lock; + + if ( info->dabt.size != DABT_WORD ) goto bad_width; + + have_cmd_lock = spin_trylock(&its->vcmd_lock); + spin_lock(&its->its_lock); + if ( its->enabled ) + reg = GITS_CTLR_ENABLE; + else + reg = 0; + + if ( have_cmd_lock && its->cwriter == its->creadr ) + reg |= GITS_CTLR_QUIESCENT; + + spin_unlock(&its->its_lock); + if ( have_cmd_lock ) + spin_unlock(&its->vcmd_lock); + + *r = vgic_reg32_extract(reg, info); + break; + } + case VREG32(GITS_IIDR): + if ( info->dabt.size != DABT_WORD ) goto bad_width; + *r = vgic_reg32_extract(GITS_IIDR_VALUE, info); + break; + case VREG64(GITS_TYPER): + if ( !vgic_reg64_check_access(info->dabt) ) goto bad_width; + + reg = GITS_TYPER_PHYSICAL; + reg |= (sizeof(struct vits_itte) - 1) << GITS_TYPER_ITT_SIZE_SHIFT; + reg |= (its->intid_bits - 1) << GITS_TYPER_IDBITS_SHIFT; + reg |= (its->devid_bits - 1) << GITS_TYPER_DEVIDS_SHIFT; + *r = vgic_reg64_extract(reg, info); + break; + case 0x0018 ... 0x001c: + goto read_reserved; + case 0x0020 ... 0x003c: + goto read_impl_defined; + case 0x0040 ... 0x007c: + goto read_reserved; + case VREG64(GITS_CBASER): + if ( !vgic_reg64_check_access(info->dabt) ) goto bad_width; + spin_lock(&its->its_lock); + *r = vgic_reg64_extract(its->cbaser, info); + spin_unlock(&its->its_lock); + break; + case VREG64(GITS_CWRITER): + if ( !vgic_reg64_check_access(info->dabt) ) goto bad_width; + + reg = its->cwriter; + *r = vgic_reg64_extract(reg, info); + break; + case VREG64(GITS_CREADR): + if ( !vgic_reg64_check_access(info->dabt) ) goto bad_width; + + reg = its->creadr; + *r = vgic_reg64_extract(reg, info); + break; + case 0x0098 ... 0x00fc: + goto read_reserved; + case VREG64(GITS_BASER0): /* device table */ + if ( !vgic_reg64_check_access(info->dabt) ) goto bad_width; + spin_lock(&its->its_lock); + *r = vgic_reg64_extract(its->baser_dev, info); + spin_unlock(&its->its_lock); + break; + case VREG64(GITS_BASER1): /* collection table */ + if ( !vgic_reg64_check_access(info->dabt) ) goto bad_width; + spin_lock(&its->its_lock); + *r = vgic_reg64_extract(its->baser_coll, info); + spin_unlock(&its->its_lock); + break; + case VRANGE64(GITS_BASER2, GITS_BASER7): + goto read_as_zero_64; + case 0x0140 ... 0xbffc: + goto read_reserved; + case 0xc000 ... 0xffcc: + goto read_impl_defined; + case 0xffd0 ... 0xffe4: + goto read_as_zero_64; + case VREG32(GITS_PIDR2): + if ( info->dabt.size != DABT_WORD ) goto bad_width; + *r = vgic_reg32_extract(GIC_PIDR2_ARCH_GICv3, info); + break; + case 0xffec ... 0xfffc: + goto read_as_zero_64; Why don't you have a default here? You don't cover all the ranges (basically all the end of reserved regions such as 0xfffc - 0xfffe ...). For those accesses you will return 1 as it was handled. So please add a default and switch all s ... e to VRANGE*. + } + + return 1; + +read_as_zero_64: + if ( !vgic_reg64_check_access(info->dabt) ) goto bad_width; + *r = 0; + + return 1; + +read_impl_defined: + printk(XENLOG_G_DEBUG + "%pv: vGITS: RAZ on implementation defined register offset %#04lx\n", + v, info->gpa & 0xffff); + *r = 0; + return 1; + +read_reserved: + printk(XENLOG_G_DEBUG + "%pv: vGITS: RAZ on reserved register offset %#04lx\n", + v, info->gpa & 0xffff); + *r = 0; + return 1; + +bad_width: + printk(XENLOG_G_ERR "vGIIS: bad read width %d r%d offset %#04lx\n", + info->dabt.size, info->dabt.reg, (unsigned long)info->gpa & 0xffff); + domain_crash_synchronous(); + + return 0; +} + +/****************************** + * ITS registers write access * + ******************************/ + +static unsigned int its_baser_table_size(uint64_t baser) +{ + unsigned int ret, page_size[4] = {SZ_4K, SZ_16K, SZ_64K, SZ_64K}; + + ret = page_size[(baser >> GITS_BASER_PAGE_SIZE_SHIFT) & 3]; + + return ret * ((baser & GITS_BASER_SIZE_MASK) + 1); +} + +static unsigned int its_baser_nr_entries(uint64_t baser) +{ + int entry_size = GITS_BASER_ENTRY_SIZE(baser); You said you fixed this .... but it looks like not. So please s/int/unsigned int/ + + return its_baser_table_size(baser) / entry_size; +} + +/* Must be called with the ITS lock held. */ +static bool vgic_v3_verify_its_status(struct virt_its *its, bool status) +{ + ASSERT(spin_is_locked(&its->its_lock)); + + if ( !status ) + return false; + + if ( !(its->cbaser & GITS_VALID_BIT) || + !(its->baser_dev & GITS_VALID_BIT) || + !(its->baser_coll & GITS_VALID_BIT) ) + { + printk(XENLOG_G_WARNING "d%d tried to enable ITS without having the tables configured.\n", + its->d->domain_id); + return false; + } + + return true; +} + +static void sanitize_its_base_reg(uint64_t *reg) +{ + uint64_t r = *reg; + + /* Avoid outer shareable. */ + switch ( (r >> GITS_BASER_SHAREABILITY_SHIFT) & 0x03 ) + { + case GIC_BASER_OuterShareable: + r = r & ~GITS_BASER_SHAREABILITY_MASK; NIT r &= ~GITS...; + r |= GIC_BASER_InnerShareable << GITS_BASER_SHAREABILITY_SHIFT; + break; + default: + break; + } + + /* Avoid any inner non-cacheable mapping. */ + switch ( (r >> GITS_BASER_INNER_CACHEABILITY_SHIFT) & 0x07 ) + { + case GIC_BASER_CACHE_nCnB: + case GIC_BASER_CACHE_nC: + r = r & ~GITS_BASER_INNER_CACHEABILITY_MASK; Ditto. + r |= GIC_BASER_CACHE_RaWb << GITS_BASER_INNER_CACHEABILITY_SHIFT; + break; + default: + break; + } + + /* Only allow non-cacheable or same-as-inner. */ + switch ( (r >> GITS_BASER_OUTER_CACHEABILITY_SHIFT) & 0x07 ) + { + case GIC_BASER_CACHE_SameAsInner: + case GIC_BASER_CACHE_nC: + break; + default: + r = r & ~GITS_BASER_OUTER_CACHEABILITY_MASK; Ditto. + r |= GIC_BASER_CACHE_nC << GITS_BASER_OUTER_CACHEABILITY_SHIFT; + break; + } + + *reg = r; +} + +static int vgic_v3_its_mmio_write(struct vcpu *v, mmio_info_t *info, + register_t r, void *priv) +{ + struct domain *d = v->domain; + struct virt_its *its = priv; + uint64_t reg; + uint32_t reg32; + + switch ( info->gpa & 0xffff ) + { + case VREG32(GITS_CTLR): + { + uint32_t ctlr; + + if ( info->dabt.size != DABT_WORD ) goto bad_width; + + /* + * We need to take the vcmd_lock to prevent a guest from disabling + * the ITS while commands are still processed. + */ + spin_lock(&its->vcmd_lock); + spin_lock(&its->its_lock); + ctlr = its->enabled ? GITS_CTLR_ENABLE : 0; + reg32 = ctlr; + vgic_reg32_update(®32, r, info); + + if ( ctlr ^ reg32 ) + its->enabled = vgic_v3_verify_its_status(its, + reg32 & GITS_CTLR_ENABLE); + spin_unlock(&its->its_lock); + spin_unlock(&its->vcmd_lock); + return 1; + } + + case VREG32(GITS_IIDR): + goto write_ignore_32; + case VREG32(GITS_TYPER): + goto write_ignore_32; + case 0x0018 ... 0x001c: Please correctly implement the range use VRANGE*. + goto write_reserved; + case 0x0020 ... 0x003c: + goto write_impl_defined; + case 0x0040 ... 0x007c: + goto write_reserved; + case VREG64(GITS_CBASER): + if ( !vgic_reg64_check_access(info->dabt) ) goto bad_width; + + spin_lock(&its->vcmd_lock); Why do you need to take the command lock here? its->enabled will prevent to modify cbaser to be overwritten. + spin_lock(&its->its_lock); + /* Changing base registers with the ITS enabled is UNPREDICTABLE. */ + if ( its->enabled ) + { + spin_unlock(&its->its_lock); + spin_unlock(&its->vcmd_lock); + gdprintk(XENLOG_WARNING, + "ITS: tried to change CBASER with the ITS enabled.\n"); + return 1; + } + + reg = its->cbaser; + vgic_reg64_update(®, r, info); + sanitize_its_base_reg(®); + + its->cbaser = reg; + its->creadr = 0; + spin_unlock(&its->its_lock); + spin_unlock(&its->vcmd_lock); + + return 1; + + case VREG64(GITS_CWRITER): + if ( !vgic_reg64_check_access(info->dabt) ) goto bad_width; + + spin_lock(&its->vcmd_lock); + reg = its->cwriter & 0xfffe0; Please explain this mask. + vgic_reg64_update(®, r, info); + its->cwriter = reg & 0xfffe0; Ditto. You likely need a define for that. + + if ( its->enabled ) So its->enabled is in this case protected by vcmd_lock and not its->lock as other place, correct? If so, please document it. + { + int ret = vgic_its_handle_cmds(d, its); I am not convinced of the usefulness of the temporary variable ret. You could directly do: if ( vgic_its_handle_cmds(...) ) printk(....) + + if ( ret ) + printk(XENLOG_G_WARNING "error handling ITS commands\n"); Again you likely want to print the domain id here. So I would it to gdprintk. + } + spin_unlock(&its->vcmd_lock); + + return 1; + + case VREG64(GITS_CREADR): + goto write_ignore_64; + + case 0x0098 ... 0x00fc: + goto write_reserved; + case VREG64(GITS_BASER0): /* device table */ + if ( !vgic_reg64_check_access(info->dabt) ) goto bad_width; + + spin_lock(&its->its_lock); + + /* + * Changing base registers with the ITS enabled is UNPREDICTABLE, + * we choose to ignore it, but warn. + */ + if ( its->enabled ) + { + spin_unlock(&its->its_lock); + gdprintk(XENLOG_WARNING, "ITS: tried to change BASER with the ITS enabled.\n"); + + return 1; + } + + reg = its->baser_dev; + vgic_reg64_update(®, r, info); + + /* We don't support indirect tables for now. */ + reg &= ~(GITS_BASER_RO_MASK | GITS_BASER_INDIRECT); + reg |= (sizeof(uint64_t) - 1) << GITS_BASER_ENTRY_SIZE_SHIFT; + reg |= GITS_BASER_TYPE_DEVICE << GITS_BASER_TYPE_SHIFT; + sanitize_its_base_reg(®); + + if ( reg & GITS_VALID_BIT ) + { + its->max_devices = its_baser_nr_entries(reg); + if ( its->max_devices > BIT(its->devid_bits) ) + its->max_devices = BIT(its->devid_bits); + } + else + its->max_devices = 0; + + its->baser_dev = reg; + spin_unlock(&its->its_lock); + return 1; + case VREG64(GITS_BASER1): /* collection table */ + if ( !vgic_reg64_check_access(info->dabt) ) goto bad_width; + + spin_lock(&its->its_lock); + /* + * Changing base registers with the ITS enabled is UNPREDICTABLE, + * we choose to ignore it, but warn. + */ + if ( its->enabled ) + { + spin_unlock(&its->its_lock); + gdprintk(XENLOG_INFO, "ITS: tried to change BASER with the ITS enabled.\n"); + return 1; + } + + reg = its->baser_coll; + vgic_reg64_update(®, r, info); + /* No indirect tables for the collection table. */ + reg &= ~(GITS_BASER_RO_MASK | GITS_BASER_INDIRECT); + reg |= (sizeof(uint16_t) - 1) << GITS_BASER_ENTRY_SIZE_SHIFT; + reg |= GITS_BASER_TYPE_COLLECTION << GITS_BASER_TYPE_SHIFT; + sanitize_its_base_reg(®); + + if ( reg & GITS_VALID_BIT ) + its->max_collections = its_baser_nr_entries(reg); + else + its->max_collections = 0; + its->baser_coll = reg; + spin_unlock(&its->its_lock); + return 1; + case VRANGE64(GITS_BASER2, GITS_BASER7): + goto write_ignore_64; + case 0x0140 ... 0xbffc: + goto write_reserved; + case 0xc000 ... 0xffcc: + goto write_impl_defined; + case 0xffd0 ... 0xffe4: /* IMPDEF identification registers */ + goto write_impl_defined; + case VREG32(GITS_PIDR2): + goto write_ignore_32; + case 0xffec ... 0xfffc: /* IMPDEF identification registers */ + goto write_impl_defined; + default: + gdprintk(XENLOG_G_WARNING, "ITS: unhandled ITS register 0x%lx\n", + info->gpa & 0xffff); + return 0; + } + + return 1; + +write_ignore_64: + if ( !vgic_reg64_check_access(info->dabt) ) goto bad_width; + return 1; + +write_ignore_32: + if ( info->dabt.size != DABT_WORD ) goto bad_width; + return 1; + +write_impl_defined: + printk(XENLOG_G_DEBUG + "%pv: vGITS: WI on implementation defined register offset %#04lx\n", + v, info->gpa & 0xffff); + return 1; + +write_reserved: + printk(XENLOG_G_DEBUG + "%pv: vGITS: WI on implementation defined register offset %#04lx\n", + v, info->gpa & 0xffff); + return 1; + +bad_width: + printk(XENLOG_G_ERR "vGITS: bad write width %d r%d offset %#08lx\n", + info->dabt.size, info->dabt.reg, (unsigned long)info->gpa & 0xffff); + + domain_crash_synchronous(); + + return 0; +} + +static const struct mmio_handler_ops vgic_its_mmio_handler = { + .read = vgic_v3_its_mmio_read, + .write = vgic_v3_its_mmio_write, +}; + /* * Local variables: * mode: C diff --git a/xen/include/asm-arm/gic_v3_its.h b/xen/include/asm-arm/gic_v3_its.h index 09c7117..ea574c4 100644 --- a/xen/include/asm-arm/gic_v3_its.h +++ b/xen/include/asm-arm/gic_v3_its.h @@ -35,6 +35,7 @@ #define GITS_BASER5 0x128 #define GITS_BASER6 0x130 #define GITS_BASER7 0x138 +#define GITS_PIDR2 GICR_PIDR2 /* Register bits */ #define GITS_VALID_BIT BIT(63) @@ -57,6 +58,7 @@ #define GITS_TYPER_ITT_SIZE_MASK (0xfUL << GITS_TYPER_ITT_SIZE_SHIFT) #define GITS_TYPER_ITT_SIZE(r) ((((r) & GITS_TYPER_ITT_SIZE_MASK) >> \ GITS_TYPER_ITT_SIZE_SHIFT) + 1) +#define GITS_TYPER_PHYSICAL (1U << 0) #define GITS_BASER_INDIRECT BIT(62) #define GITS_BASER_INNER_CACHEABILITY_SHIFT 59 @@ -76,6 +78,7 @@ (((reg >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0x1f) + 1) #define GITS_BASER_SHAREABILITY_SHIFT 10 #define GITS_BASER_PAGE_SIZE_SHIFT 8 +#define GITS_BASER_SIZE_MASK 0xff #define GITS_BASER_SHAREABILITY_MASK (0x3ULL << GITS_BASER_SHAREABILITY_SHIFT) #define GITS_BASER_OUTER_CACHEABILITY_MASK (0x7ULL << GITS_BASER_OUTER_CACHEABILITY_SHIFT) #define GITS_BASER_INNER_CACHEABILITY_MASK (0x7ULL << GITS_BASER_INNER_CACHEABILITY_SHIFT) -- Julien Grall _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |