[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH for-4.9 v4 1/2] x86/vioapic: introduce support for multiple vIO APICS
Add support for multiple vIO APICs on the same domain, thus turning d->arch.hvm_domain.vioapic into an array of vIO APIC control structures. Note that this functionality is not exposed to unprivileged guests, and will only be used by PVHv2 Dom0. Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx> --- Cc: Jan Beulich <jbeulich@xxxxxxxx> Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> --- Changes since v3: - Fix for loop in base_gsi to properly use domain_vioapic and switch to a while loop instead. - Change ASSERT in vioapic_irq_positive_edge to assert vioapic itself (instead of has_vioapic). - s/j/pin/ (two instances). - Change gdprintks in vpt to dprintk and print the domain id. - In ioapic_{save/load} check if domain has a vioapic before calling domain_vioapic. - Add an empty line between vioapic_domain define and gsi_vioapic prototype. Changes since v2: - More constify. - gsi_vioapic should return the pin and not the base GSI. - Change pin_gsi to base_gsi, and make it return the base vIO APIC GSI. - Make sure base_gsi doesn't overrun the array. - Add ASSERTs to make sure DomU don't use more than one vIO APIC. Changes since v1: - Constify some parameters. - Make gsi_vioapic return the base GSI of the IO APIC. - Add checks to pt_irq_vector in order to prevent dereferencing NULL. --- xen/arch/x86/hvm/dom0_build.c | 2 +- xen/arch/x86/hvm/vioapic.c | 225 ++++++++++++++++++++++++++++---------- xen/arch/x86/hvm/vlapic.c | 2 +- xen/arch/x86/hvm/vpt.c | 27 ++++- xen/include/asm-x86/hvm/domain.h | 3 +- xen/include/asm-x86/hvm/vioapic.h | 5 +- 6 files changed, 197 insertions(+), 67 deletions(-) diff --git a/xen/arch/x86/hvm/dom0_build.c b/xen/arch/x86/hvm/dom0_build.c index 5576db4ee8..daa791d3f4 100644 --- a/xen/arch/x86/hvm/dom0_build.c +++ b/xen/arch/x86/hvm/dom0_build.c @@ -729,7 +729,7 @@ static int __init pvh_setup_acpi_madt(struct domain *d, paddr_t *addr) io_apic = (void *)(madt + 1); io_apic->header.type = ACPI_MADT_TYPE_IO_APIC; io_apic->header.length = sizeof(*io_apic); - io_apic->id = domain_vioapic(d)->id; + io_apic->id = domain_vioapic(d, 0)->id; io_apic->address = VIOAPIC_DEFAULT_BASE_ADDRESS; x2apic = (void *)(io_apic + 1); diff --git a/xen/arch/x86/hvm/vioapic.c b/xen/arch/x86/hvm/vioapic.c index 6bc8dbdd42..0badb055d8 100644 --- a/xen/arch/x86/hvm/vioapic.c +++ b/xen/arch/x86/hvm/vioapic.c @@ -42,7 +42,58 @@ /* HACK: Route IRQ0 only to VCPU0 to prevent time jumps. */ #define IRQ0_SPECIAL_ROUTING 1 -static void vioapic_deliver(struct hvm_vioapic *vioapic, int irq); +static void vioapic_deliver(struct hvm_vioapic *vioapic, unsigned int irq); + +static struct hvm_vioapic *addr_vioapic(const struct domain *d, + unsigned long addr) +{ + unsigned int i; + + for ( i = 0; i < d->arch.hvm_domain.nr_vioapics; i++ ) + { + struct hvm_vioapic *vioapic = domain_vioapic(d, i); + + if ( addr >= vioapic->base_address && + addr < vioapic->base_address + VIOAPIC_MEM_LENGTH ) + return vioapic; + } + + return NULL; +} + +struct hvm_vioapic *gsi_vioapic(const struct domain *d, unsigned int gsi, + unsigned int *pin) +{ + unsigned int i, base_gsi = 0; + + for ( i = 0; i < d->arch.hvm_domain.nr_vioapics; i++ ) + { + struct hvm_vioapic *vioapic = domain_vioapic(d, i); + + if ( gsi >= base_gsi && gsi < base_gsi + vioapic->nr_pins ) + { + *pin = gsi - base_gsi; + return vioapic; + } + + base_gsi += vioapic->nr_pins; + } + + return NULL; +} + +static unsigned int base_gsi(const struct domain *d, + const struct hvm_vioapic *vioapic) +{ + unsigned int nr_vioapics = d->arch.hvm_domain.nr_vioapics; + unsigned int base_gsi = 0, i = 0; + const struct hvm_vioapic *tmp; + + while ( --nr_vioapics && (tmp = domain_vioapic(d, i++)) != vioapic ) + base_gsi += tmp->nr_pins; + + return base_gsi; +} static uint32_t vioapic_read_indirect(const struct hvm_vioapic *vioapic) { @@ -94,11 +145,14 @@ static int vioapic_read( struct vcpu *v, unsigned long addr, unsigned int length, unsigned long *pval) { - const struct hvm_vioapic *vioapic = domain_vioapic(v->domain); + const struct hvm_vioapic *vioapic; uint32_t result; HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "addr %lx", addr); + vioapic = addr_vioapic(v->domain, addr); + ASSERT(vioapic); + switch ( addr & 0xff ) { case VIOAPIC_REG_SELECT: @@ -126,6 +180,7 @@ static void vioapic_write_redirent( struct hvm_irq *hvm_irq = hvm_domain_irq(d); union vioapic_redir_entry *pent, ent; int unmasked = 0; + unsigned int gsi = base_gsi(d, vioapic) + idx; spin_lock(&d->arch.hvm_domain.irq_lock); @@ -149,7 +204,7 @@ static void vioapic_write_redirent( *pent = ent; - if ( idx == 0 ) + if ( gsi == 0 ) { vlapic_adjust_i8259_target(d); } @@ -165,7 +220,7 @@ static void vioapic_write_redirent( spin_unlock(&d->arch.hvm_domain.irq_lock); - if ( idx == 0 || unmasked ) + if ( gsi == 0 || unmasked ) pt_may_unmask_irq(d, NULL); } @@ -215,7 +270,10 @@ static int vioapic_write( struct vcpu *v, unsigned long addr, unsigned int length, unsigned long val) { - struct hvm_vioapic *vioapic = domain_vioapic(v->domain); + struct hvm_vioapic *vioapic; + + vioapic = addr_vioapic(v->domain, addr); + ASSERT(vioapic); switch ( addr & 0xff ) { @@ -242,10 +300,7 @@ static int vioapic_write( static int vioapic_range(struct vcpu *v, unsigned long addr) { - struct hvm_vioapic *vioapic = domain_vioapic(v->domain); - - return ((addr >= vioapic->base_address && - (addr < vioapic->base_address + VIOAPIC_MEM_LENGTH))); + return !!addr_vioapic(v->domain, addr); } static const struct hvm_mmio_ops vioapic_mmio_ops = { @@ -275,16 +330,17 @@ static inline int pit_channel0_enabled(void) return pt_active(¤t->domain->arch.vpit.pt0); } -static void vioapic_deliver(struct hvm_vioapic *vioapic, int irq) +static void vioapic_deliver(struct hvm_vioapic *vioapic, unsigned int pin) { - uint16_t dest = vioapic->redirtbl[irq].fields.dest_id; - uint8_t dest_mode = vioapic->redirtbl[irq].fields.dest_mode; - uint8_t delivery_mode = vioapic->redirtbl[irq].fields.delivery_mode; - uint8_t vector = vioapic->redirtbl[irq].fields.vector; - uint8_t trig_mode = vioapic->redirtbl[irq].fields.trig_mode; + uint16_t dest = vioapic->redirtbl[pin].fields.dest_id; + uint8_t dest_mode = vioapic->redirtbl[pin].fields.dest_mode; + uint8_t delivery_mode = vioapic->redirtbl[pin].fields.delivery_mode; + uint8_t vector = vioapic->redirtbl[pin].fields.vector; + uint8_t trig_mode = vioapic->redirtbl[pin].fields.trig_mode; struct domain *d = vioapic_domain(vioapic); struct vlapic *target; struct vcpu *v; + unsigned int irq = base_gsi(d, vioapic) + pin; ASSERT(spin_is_locked(&d->arch.hvm_domain.irq_lock)); @@ -361,64 +417,71 @@ static void vioapic_deliver(struct hvm_vioapic *vioapic, int irq) void vioapic_irq_positive_edge(struct domain *d, unsigned int irq) { - struct hvm_vioapic *vioapic = domain_vioapic(d); + unsigned int pin; + struct hvm_vioapic *vioapic = gsi_vioapic(d, irq, &pin); union vioapic_redir_entry *ent; - ASSERT(has_vioapic(d)); + ASSERT(vioapic); HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "irq %x", irq); - ASSERT(irq < vioapic->nr_pins); + ASSERT(pin < vioapic->nr_pins); ASSERT(spin_is_locked(&d->arch.hvm_domain.irq_lock)); - ent = &vioapic->redirtbl[irq]; + ent = &vioapic->redirtbl[pin]; if ( ent->fields.mask ) return; if ( ent->fields.trig_mode == VIOAPIC_EDGE_TRIG ) { - vioapic_deliver(vioapic, irq); + vioapic_deliver(vioapic, pin); } else if ( !ent->fields.remote_irr ) { ent->fields.remote_irr = 1; - vioapic_deliver(vioapic, irq); + vioapic_deliver(vioapic, pin); } } void vioapic_update_EOI(struct domain *d, u8 vector) { - struct hvm_vioapic *vioapic = domain_vioapic(d); struct hvm_irq *hvm_irq = hvm_domain_irq(d); union vioapic_redir_entry *ent; - int gsi; + unsigned int i, base_gsi = 0; ASSERT(has_vioapic(d)); spin_lock(&d->arch.hvm_domain.irq_lock); - for ( gsi = 0; gsi < vioapic->nr_pins; gsi++ ) + for ( i = 0; i < d->arch.hvm_domain.nr_vioapics; i++ ) { - ent = &vioapic->redirtbl[gsi]; - if ( ent->fields.vector != vector ) - continue; - - ent->fields.remote_irr = 0; + struct hvm_vioapic *vioapic = domain_vioapic(d, i); + unsigned int pin; - if ( iommu_enabled ) + for ( pin = 0; pin < vioapic->nr_pins; pin++ ) { - spin_unlock(&d->arch.hvm_domain.irq_lock); - hvm_dpci_eoi(d, gsi, ent); - spin_lock(&d->arch.hvm_domain.irq_lock); - } - - if ( (ent->fields.trig_mode == VIOAPIC_LEVEL_TRIG) && - !ent->fields.mask && - hvm_irq->gsi_assert_count[gsi] ) - { - ent->fields.remote_irr = 1; - vioapic_deliver(vioapic, gsi); + ent = &vioapic->redirtbl[pin]; + if ( ent->fields.vector != vector ) + continue; + + ent->fields.remote_irr = 0; + + if ( iommu_enabled ) + { + spin_unlock(&d->arch.hvm_domain.irq_lock); + hvm_dpci_eoi(d, base_gsi + pin, ent); + spin_lock(&d->arch.hvm_domain.irq_lock); + } + + if ( (ent->fields.trig_mode == VIOAPIC_LEVEL_TRIG) && + !ent->fields.mask && + hvm_irq->gsi_assert_count[base_gsi + pin] ) + { + ent->fields.remote_irr = 1; + vioapic_deliver(vioapic, pin); + } } + base_gsi += vioapic->nr_pins; } spin_unlock(&d->arch.hvm_domain.irq_lock); @@ -426,12 +489,15 @@ void vioapic_update_EOI(struct domain *d, u8 vector) static int ioapic_save(struct domain *d, hvm_domain_context_t *h) { - struct hvm_vioapic *s = domain_vioapic(d); + struct hvm_vioapic *s; if ( !has_vioapic(d) ) return 0; - if ( s->nr_pins != ARRAY_SIZE(s->domU.redirtbl) ) + s = domain_vioapic(d, 0); + + if ( s->nr_pins != ARRAY_SIZE(s->domU.redirtbl) || + d->arch.hvm_domain.nr_vioapics != 1 ) return -EOPNOTSUPP; return hvm_save_entry(IOAPIC, 0, h, &s->domU); @@ -439,12 +505,15 @@ static int ioapic_save(struct domain *d, hvm_domain_context_t *h) static int ioapic_load(struct domain *d, hvm_domain_context_t *h) { - struct hvm_vioapic *s = domain_vioapic(d); + struct hvm_vioapic *s; if ( !has_vioapic(d) ) return -ENODEV; - if ( s->nr_pins != ARRAY_SIZE(s->domU.redirtbl) ) + s = domain_vioapic(d, 0); + + if ( s->nr_pins != ARRAY_SIZE(s->domU.redirtbl) || + d->arch.hvm_domain.nr_vioapics != 1 ) return -EOPNOTSUPP; return hvm_load_entry(IOAPIC, h, &s->domU); @@ -454,32 +523,68 @@ HVM_REGISTER_SAVE_RESTORE(IOAPIC, ioapic_save, ioapic_load, 1, HVMSR_PER_DOM); void vioapic_reset(struct domain *d) { - struct hvm_vioapic *vioapic = domain_vioapic(d); - uint32_t nr_pins = vioapic->nr_pins; - int i; + unsigned int i; if ( !has_vioapic(d) ) + { + ASSERT(!d->arch.hvm_domain.nr_vioapics); return; + } - memset(vioapic, 0, hvm_vioapic_size(nr_pins)); - vioapic->domain = d; - vioapic->nr_pins = nr_pins; - for ( i = 0; i < nr_pins; i++ ) - vioapic->redirtbl[i].fields.mask = 1; - vioapic->base_address = VIOAPIC_DEFAULT_BASE_ADDRESS; + for ( i = 0; i < d->arch.hvm_domain.nr_vioapics; i++ ) + { + struct hvm_vioapic *vioapic = domain_vioapic(d, i); + unsigned int pin, nr_pins = vioapic->nr_pins; + + memset(vioapic, 0, hvm_vioapic_size(nr_pins)); + for ( pin = 0; pin < nr_pins; pin++ ) + vioapic->redirtbl[pin].fields.mask = 1; + ASSERT(!i); + vioapic->base_address = VIOAPIC_DEFAULT_BASE_ADDRESS + + VIOAPIC_MEM_LENGTH * 0; + vioapic->id = 0; + vioapic->nr_pins = nr_pins; + vioapic->domain = d; + } +} + +static void vioapic_free(const struct domain *d, unsigned int nr_vioapics) +{ + unsigned int i; + + for ( i = 0; i < nr_vioapics; i++) + xfree(domain_vioapic(d, i)); + xfree(d->arch.hvm_domain.vioapic); } int vioapic_init(struct domain *d) { + unsigned int i, nr_vioapics = 1; + unsigned int nr_pins = ARRAY_SIZE(domain_vioapic(d, 0)->domU.redirtbl); + if ( !has_vioapic(d) ) + { + ASSERT(!d->arch.hvm_domain.nr_vioapics); return 0; + } if ( (d->arch.hvm_domain.vioapic == NULL) && - ((d->arch.hvm_domain.vioapic = xmalloc(struct hvm_vioapic)) == NULL) ) + ((d->arch.hvm_domain.vioapic = + xzalloc_array(struct hvm_vioapic *, nr_vioapics)) == NULL) ) return -ENOMEM; - d->arch.hvm_domain.vioapic->domain = d; - domain_vioapic(d)->nr_pins = ARRAY_SIZE(domain_vioapic(d)->domU.redirtbl); + for ( i = 0; i < nr_vioapics; i++ ) + { + if ( (domain_vioapic(d, i) = + xmalloc_bytes(hvm_vioapic_size(nr_pins))) == NULL ) + { + vioapic_free(d, nr_vioapics); + return -ENOMEM; + } + domain_vioapic(d, i)->nr_pins = nr_pins; + } + + d->arch.hvm_domain.nr_vioapics = nr_vioapics; vioapic_reset(d); register_mmio_handler(d, &vioapic_mmio_ops); @@ -490,8 +595,10 @@ int vioapic_init(struct domain *d) void vioapic_deinit(struct domain *d) { if ( !has_vioapic(d) ) + { + ASSERT(!d->arch.hvm_domain.nr_vioapics); return; + } - xfree(d->arch.hvm_domain.vioapic); - d->arch.hvm_domain.vioapic = NULL; + vioapic_free(d, d->arch.hvm_domain.nr_vioapics); } diff --git a/xen/arch/x86/hvm/vlapic.c b/xen/arch/x86/hvm/vlapic.c index 0590d6c69d..2653ba80fd 100644 --- a/xen/arch/x86/hvm/vlapic.c +++ b/xen/arch/x86/hvm/vlapic.c @@ -1107,7 +1107,7 @@ static int __vlapic_accept_pic_intr(struct vcpu *v) if ( !has_vioapic(d) ) return 0; - redir0 = domain_vioapic(d)->redirtbl[0]; + redir0 = domain_vioapic(d, 0)->redirtbl[0]; /* We deliver 8259 interrupts to the appropriate CPU as follows. */ return ((/* IOAPIC pin0 is unmasked and routing to this LAPIC? */ diff --git a/xen/arch/x86/hvm/vpt.c b/xen/arch/x86/hvm/vpt.c index 5c48fdb4b5..e3f203915b 100644 --- a/xen/arch/x86/hvm/vpt.c +++ b/xen/arch/x86/hvm/vpt.c @@ -78,7 +78,8 @@ void hvm_set_guest_time(struct vcpu *v, u64 guest_time) static int pt_irq_vector(struct periodic_time *pt, enum hvm_intsrc src) { struct vcpu *v = pt->vcpu; - unsigned int gsi, isa_irq; + struct hvm_vioapic *vioapic; + unsigned int gsi, isa_irq, pin; if ( pt->source == PTSRC_lapic ) return pt->irq; @@ -91,13 +92,23 @@ static int pt_irq_vector(struct periodic_time *pt, enum hvm_intsrc src) + (isa_irq & 7)); ASSERT(src == hvm_intsrc_lapic); - return domain_vioapic(v->domain)->redirtbl[gsi].fields.vector; + vioapic = gsi_vioapic(v->domain, gsi, &pin); + if ( !vioapic ) + { + dprintk(XENLOG_WARNING, "d%u: invalid GSI (%u) for platform timer\n", + v->domain->domain_id, gsi); + domain_crash(v->domain); + return -1; + } + + return vioapic->redirtbl[pin].fields.vector; } static int pt_irq_masked(struct periodic_time *pt) { struct vcpu *v = pt->vcpu; - unsigned int gsi, isa_irq; + unsigned int gsi, isa_irq, pin; + struct hvm_vioapic *vioapic; uint8_t pic_imr; if ( pt->source == PTSRC_lapic ) @@ -110,9 +121,17 @@ static int pt_irq_masked(struct periodic_time *pt) isa_irq = pt->irq; gsi = hvm_isa_irq_to_gsi(isa_irq); pic_imr = v->domain->arch.hvm_domain.vpic[isa_irq >> 3].imr; + vioapic = gsi_vioapic(v->domain, gsi, &pin); + if ( !vioapic ) + { + dprintk(XENLOG_WARNING, "d%u: invalid GSI (%u) for platform timer\n", + v->domain->domain_id, gsi); + domain_crash(v->domain); + return -1; + } return (((pic_imr & (1 << (isa_irq & 7))) || !vlapic_accept_pic_intr(v)) && - domain_vioapic(v->domain)->redirtbl[gsi].fields.mask); + vioapic->redirtbl[pin].fields.mask); } static void pt_lock(struct periodic_time *pt) diff --git a/xen/include/asm-x86/hvm/domain.h b/xen/include/asm-x86/hvm/domain.h index 622e1ca12d..d2899c9bb2 100644 --- a/xen/include/asm-x86/hvm/domain.h +++ b/xen/include/asm-x86/hvm/domain.h @@ -127,7 +127,8 @@ struct hvm_domain { spinlock_t irq_lock; struct hvm_irq *irq; struct hvm_hw_vpic vpic[2]; /* 0=master; 1=slave */ - struct hvm_vioapic *vioapic; + struct hvm_vioapic **vioapic; + unsigned int nr_vioapics; struct hvm_hw_stdvga stdvga; /* diff --git a/xen/include/asm-x86/hvm/vioapic.h b/xen/include/asm-x86/hvm/vioapic.h index bf81ef1c59..8ec91d2bd1 100644 --- a/xen/include/asm-x86/hvm/vioapic.h +++ b/xen/include/asm-x86/hvm/vioapic.h @@ -57,9 +57,12 @@ struct hvm_vioapic { }; #define hvm_vioapic_size(cnt) offsetof(struct hvm_vioapic, redirtbl[cnt]) -#define domain_vioapic(d) ((d)->arch.hvm_domain.vioapic) +#define domain_vioapic(d, i) ((d)->arch.hvm_domain.vioapic[i]) #define vioapic_domain(v) ((v)->domain) +struct hvm_vioapic *gsi_vioapic(const struct domain *d, unsigned int gsi, + unsigned int *pin); + int vioapic_init(struct domain *d); void vioapic_deinit(struct domain *d); void vioapic_reset(struct domain *d); -- 2.11.0 (Apple Git-81) _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |