[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v5 08/22] xen/arm: ITS: Add virtual ITS commands support
From: Vijaya Kumar K <Vijaya.Kumar@xxxxxxxxxxxxxxxxxx> Add Virtual ITS command processing support to Virtual ITS driver Signed-off-by: Vijaya Kumar K <Vijaya.Kumar@xxxxxxxxxxxxxxxxxx> --- v5: - Rename vgic_its_*() to vits_*() v4: - Use helper function to read from command queue - Add MOVALL - Removed check for entry in device in domain RB-tree --- xen/arch/arm/vgic-v3-its.c | 392 +++++++++++++++++++++++++++++++++++++++++ xen/include/asm-arm/gic-its.h | 13 ++ 2 files changed, 405 insertions(+) diff --git a/xen/arch/arm/vgic-v3-its.c b/xen/arch/arm/vgic-v3-its.c index 60f8332..dfa3435 100644 --- a/xen/arch/arm/vgic-v3-its.c +++ b/xen/arch/arm/vgic-v3-its.c @@ -30,8 +30,27 @@ #include <asm/gic.h> #include <asm/vgic.h> #include <asm/gic-its.h> +#include <asm/atomic.h> #include <xen/log2.h> +#define DEBUG_ITS + +#ifdef DEBUG_ITS +# define DPRINTK(fmt, args...) dprintk(XENLOG_DEBUG, fmt, ##args) +#else +# define DPRINTK(fmt, args...) do {} while ( 0 ) +#endif + +#ifdef DEBUG_ITS +static void dump_cmd(its_cmd_block *cmd) +{ + printk("VITS:CMD[0] = 0x%lx CMD[1] = 0x%lx CMD[2] = 0x%lx CMD[3] = 0x%lx\n", + cmd->bits[0], cmd->bits[1], cmd->bits[2], cmd->bits[3]); +} +#else +static void dump_cmd(its_cmd_block *cmd) { do {} while ( 0 ); } +#endif + static int vits_access_guest_table(struct domain *d, paddr_t entry, void *addr, uint32_t size, bool_t set) { @@ -152,6 +171,379 @@ int vits_get_vitt_entry(struct domain *d, uint32_t devid, return vits_vitt_entry(d, devid, event, entry, 0); } +static int vits_process_sync(struct vcpu *v, struct vgic_its *vits, + its_cmd_block *virt_cmd) +{ + /* Ignored */ + DPRINTK("%pv: vITS: SYNC: ta 0x%"PRIx32" \n", v, virt_cmd->sync.ta); + + return 0; +} + +static int vits_process_mapvi(struct vcpu *v, struct vgic_its *vits, + its_cmd_block *virt_cmd) +{ + struct vitt entry; + struct domain *d = v->domain; + uint8_t vcol_id, cmd; + uint32_t vid, dev_id, event; + + vcol_id = virt_cmd->mapvi.col; + vid = virt_cmd->mapvi.phy_id; + cmd = virt_cmd->mapvi.cmd; + dev_id = virt_cmd->mapvi.devid; + + DPRINTK("%pv: vITS: MAPVI: dev 0x%"PRIx32" vcol %"PRId32" vid %"PRId32"\n", + v, dev_id, vcol_id, vid); + + entry.valid = true; + entry.vcollection = vcol_id; + entry.vlpi = vid; + + if ( cmd == GITS_CMD_MAPI ) + vits_set_vitt_entry(d, dev_id, vid, &entry); + else + { + event = virt_cmd->mapvi.event; + vits_set_vitt_entry(d, dev_id, event, &entry); + } + + return 0; +} + +static int vits_process_movi(struct vcpu *v, struct vgic_its *vits, + its_cmd_block *virt_cmd) +{ + struct vitt entry; + struct domain *d = v->domain; + uint32_t dev_id, event; + uint8_t vcol_id; + + vcol_id = virt_cmd->movi.col; + event = virt_cmd->movi.event; + dev_id = virt_cmd->movi.devid; + + DPRINTK("%pv vITS: MOVI: dev_id 0x%"PRIx32" vcol %"PRId32" event %"PRId32"\n", + v, dev_id, vcol_id, event); + + if ( vits_get_vitt_entry(d, dev_id, event, &entry) ) + return -EINVAL; + + entry.vcollection = vcol_id; + + if ( vits_set_vitt_entry(d, dev_id, event, &entry) ) + return -EINVAL; + + return 0; +} + +static int vits_process_movall(struct vcpu *v, struct vgic_its *vits, + its_cmd_block *virt_cmd) +{ + /* Ignored */ + DPRINTK("%pv: vITS: MOVALL: ta1 0x%"PRIx32" ta2 0x%"PRIx32" \n", + v, virt_cmd->movall.ta1, virt_cmd->movall.ta2); + + return 0; +} + +static int vits_process_discard(struct vcpu *v, struct vgic_its *vits, + its_cmd_block *virt_cmd) +{ + struct vitt entry; + struct domain *d = v->domain; + uint32_t event, dev_id; + + event = virt_cmd->discard.event; + dev_id = virt_cmd->discard.devid; + + DPRINTK("%pv vITS: DISCARD: dev_id 0x%"PRIx32" id %"PRId32"\n", + v, virt_cmd->discard.devid, event); + + if ( vits_get_vitt_entry(d, dev_id, event, &entry) ) + return -EINVAL; + + entry.valid = false; + + if ( vits_set_vitt_entry(d, dev_id, event, &entry) ) + return -EINVAL; + + return 0; +} + +static int vits_process_inv(struct vcpu *v, struct vgic_its *vits, + its_cmd_block *virt_cmd) +{ + /* Ignored */ + DPRINTK("%pv vITS: INV: dev_id 0x%"PRIx32" id %"PRId32"\n", + v, virt_cmd->inv.devid, virt_cmd->inv.event); + + return 0; +} + +static int vits_process_clear(struct vcpu *v, struct vgic_its *vits, + its_cmd_block *virt_cmd) +{ + /* Ignored */ + DPRINTK("%pv: vITS: CLEAR: dev_id 0x%"PRIx32" id %"PRId32"\n", + v, virt_cmd->clear.devid, virt_cmd->clear.event); + + return 0; +} + +static int vits_process_invall(struct vcpu *v, struct vgic_its *vits, + its_cmd_block *virt_cmd) +{ + /* Ignored */ + DPRINTK("%pv: vITS: INVALL: vCID %"PRId32"\n", v, virt_cmd->invall.col); + + return 0; +} + +static int vits_process_int(struct vcpu *v, struct vgic_its *vits, + its_cmd_block *virt_cmd) +{ + uint32_t event, dev_id ; + + event = virt_cmd->int_cmd.cmd; + dev_id = virt_cmd->int_cmd.devid; + + DPRINTK("%pv: vITS: INT: Device 0x%"PRIx32" id %"PRId32"\n", + v, dev_id, event); + + /* TODO: Inject LPI */ + + return 0; +} + +static int vits_add_device(struct vcpu *v, struct vgic_its *vits, + its_cmd_block *virt_cmd) +{ + struct domain *d = v->domain; + struct vdevice_table dt_entry; + uint32_t dev_id = virt_cmd->mapd.devid; + + DPRINTK("%pv: vITS:Add dev 0x%"PRIx32" ipa = 0x%"PRIx64" size %"PRId32"\n", + v, dev_id, (u64)virt_cmd->mapd.itt << MAPC_ITT_IPA_SHIFT, + virt_cmd->mapd.size); + + if ( virt_cmd->mapd.valid ) + { + /* itt field is 40 bit. extract 48 bit address by shifting */ + dt_entry.vitt_ipa = virt_cmd->mapd.itt << MAPC_ITT_IPA_SHIFT; + dt_entry.vitt_size = (1 << (virt_cmd->mapd.size + 1)) * + sizeof(struct vitt); + } + else + { + dt_entry.vitt_ipa = INVALID_PADDR; + dt_entry.vitt_size = 0; + } + + if ( vits_set_vdevice_entry(d, dev_id, &dt_entry) ) + return -EINVAL; + + return 0; +} + +static int vits_process_mapc(struct vcpu *v, struct vgic_its *vits, + its_cmd_block *virt_cmd) +{ + uint8_t vcol_id; + uint64_t vta = 0; + + vcol_id = virt_cmd->mapc.col; + vta = virt_cmd->mapc.ta; + + DPRINTK("%pv: vITS: MAPC: vCID %"PRId32" vTA 0x%"PRIx64" valid %"PRId32"\n", + v, vcol_id, vta, virt_cmd->mapc.valid); + + if ( virt_cmd->mapc.valid ) + { + if ( vta > v->domain->max_vcpus ) + return -EINVAL; + vits->collections[vcol_id].target_address = vta; + } + else + vits->collections[vcol_id].target_address = INVALID_PADDR; + + return 0; +} + +#ifdef DEBUG_ITS +char *cmd_str[] = { + [GITS_CMD_MOVI] = "MOVI", + [GITS_CMD_INT] = "INT", + [GITS_CMD_CLEAR] = "CLEAR", + [GITS_CMD_SYNC] = "SYNC", + [GITS_CMD_MAPD] = "MAPD", + [GITS_CMD_MAPC] = "MAPC", + [GITS_CMD_MAPVI] = "MAPVI", + [GITS_CMD_MAPI] = "MAPI", + [GITS_CMD_INV] = "INV", + [GITS_CMD_INVALL] = "INVALL", + [GITS_CMD_MOVALL] = "MOVALL", + [GITS_CMD_DISCARD] = "DISCARD", + }; +#endif + +static int vits_parse_its_command(struct vcpu *v, struct vgic_its *vits, + its_cmd_block *virt_cmd) +{ + uint8_t cmd = virt_cmd->hdr.cmd; + int ret; + + DPRINTK("%pv: vITS: Received cmd %s (0x%"PRIx32")\n", v, cmd_str[cmd], cmd); + dump_cmd(virt_cmd); + + switch ( cmd ) + { + case GITS_CMD_MAPD: + ret = vits_add_device(v, vits, virt_cmd); + break; + case GITS_CMD_MAPC: + ret = vits_process_mapc(v, vits, virt_cmd); + break; + case GITS_CMD_MAPI: + /* MAPI is same as MAPVI */ + case GITS_CMD_MAPVI: + ret = vits_process_mapvi(v, vits, virt_cmd); + break; + case GITS_CMD_MOVI: + ret = vits_process_movi(v, vits, virt_cmd); + break; + case GITS_CMD_MOVALL: + ret = vits_process_movall(v, vits, virt_cmd); + break; + case GITS_CMD_DISCARD: + ret = vits_process_discard(v, vits, virt_cmd); + break; + case GITS_CMD_INV: + ret = vits_process_inv(v, vits, virt_cmd); + break; + case GITS_CMD_INVALL: + ret = vits_process_invall(v, vits, virt_cmd); + break; + case GITS_CMD_INT: + ret = vits_process_int(v, vits, virt_cmd); + break; + case GITS_CMD_CLEAR: + ret = vits_process_clear(v, vits, virt_cmd); + break; + case GITS_CMD_SYNC: + ret = vits_process_sync(v, vits, virt_cmd); + break; + default: + dprintk(XENLOG_G_ERR, "%pv: vITS: Unhandled command cmd %"PRIx32"\n", + v, cmd); + return 1; + } + + if ( ret ) + { + dprintk(XENLOG_G_ERR, "%pv: vITS: Failed to handle cmd %"PRIx32"\n", + v, cmd); + return 1; + } + + return 0; +} + +static int vits_read_virt_cmd(struct vcpu *v, struct vgic_its *vits, + its_cmd_block *virt_cmd) +{ + paddr_t maddr; + struct domain *d = v->domain; + int ret; + + ASSERT(spin_is_locked(&vits->lock)); + + if ( !(vits->cmd_base & GITS_CBASER_VALID) ) + { + dprintk(XENLOG_G_ERR, "%pv: vITS: Invalid CBASER\n", v); + return 0; + } + + /* CMD Q can be more than 1 page. Map only page that is required */ + maddr = (vits->cmd_base & MASK_4K) + atomic_read(&vits->cmd_read); + + DPRINTK("%pv: vITS: Mapping CMD Q maddr 0x%"PRIx64" read 0x%"PRIx32"\n", + v, maddr, atomic_read(&vits->cmd_read)); + + ret = vits_access_guest_table(d, maddr, (void *)virt_cmd, + sizeof(its_cmd_block), 0); + if ( ret ) + { + dprintk(XENLOG_G_ERR, + "%pv: vITS: Failed to get command page @page 0x%"PRIx32"\n", + v, atomic_read(&vits->cmd_read)); + return -EINVAL; + } + + /* No command queue is created by vits to check on Q full */ + atomic_add(sizeof(its_cmd_block), &vits->cmd_read); + if ( atomic_read(&vits->cmd_read) == vits->cmd_qsize ) + { + DPRINTK("%pv: vITS: Reset read @ 0x%"PRIx32" qsize 0x%"PRIx64"\n", + v, atomic_read(&vits->cmd_read), vits->cmd_qsize); + + atomic_set(&vits->cmd_read, 0); + } + + return 0; +} + +int vits_process_cmd(struct vcpu *v, struct vgic_its *vits) +{ + its_cmd_block virt_cmd; + + ASSERT(spin_is_locked(&vits->lock)); + + do { + if ( vits_read_virt_cmd(v, vits, &virt_cmd) ) + goto err; + if ( vits_parse_its_command(v, vits, &virt_cmd) ) + goto err; + } while ( vits->cmd_write != atomic_read(&vits->cmd_read) ); + + DPRINTK("%pv: vITS: read @ 0x%"PRIx32" write @ 0x%"PRIx64"\n", + v, atomic_read(&vits->cmd_read), + vits->cmd_write); + + return 1; +err: + dprintk(XENLOG_G_ERR, "%pv: vITS: Failed to process guest cmd\n", v); + domain_crash_synchronous(); + + return 0; +} + +int vits_domain_init(struct domain *d) +{ + struct vgic_its *vits; + int i; + + d->arch.vgic.vits = xzalloc(struct vgic_its); + if ( !d->arch.vgic.vits ) + return -ENOMEM; + + vits = d->arch.vgic.vits; + + spin_lock_init(&vits->lock); + + vits->collections = xzalloc_array(struct its_collection, nr_cpu_ids); + if ( !vits->collections ) + { + xfree(d->arch.vgic.vits); + return -ENOMEM; + } + + for ( i = 0; i < nr_cpu_ids; i++ ) + vits->collections[i].target_address = ~0UL; + + return 0; +} + /* * Local variables: * mode: C diff --git a/xen/include/asm-arm/gic-its.h b/xen/include/asm-arm/gic-its.h index 66be53a..cdb786c 100644 --- a/xen/include/asm-arm/gic-its.h +++ b/xen/include/asm-arm/gic-its.h @@ -21,6 +21,8 @@ #include <asm/gic_v3_defs.h> #include <xen/rbtree.h> +#define MASK_4K 0xfffffffff000UL +#define MAPC_ITT_IPA_SHIFT 8 /* * ITS registers, offsets from ITS_base */ @@ -121,10 +123,21 @@ struct its_collection { */ struct vgic_its { + spinlock_t lock; + /* Command queue base */ + paddr_t cmd_base; + /* Command queue write pointer */ + paddr_t cmd_write; + /* Command queue read pointer */ + atomic_t cmd_read; + /* Command queue size */ + unsigned long cmd_qsize; /* vITT device table ipa */ paddr_t dt_ipa; /* vITT device table size */ uint64_t dt_size; + /* collections mapped */ + struct its_collection *collections; }; /* ITS command structure */ -- 1.7.9.5 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |