[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH for-4.9 v2] x86/vioapic: allow holes in the GSI range for PVH Dom0
The current vIO APIC for PVH Dom0 doesn't allow non-contiguous GSIs, which means that all GSIs must belong to an IO APIC. This doesn't match reality, where there are systems with non-contiguous GSIs. In order to fix this add a base_gsi field to each hvm_vioapic struct, in order to store the base GSI for each emulated IO APIC. For PVH Dom0 those values are populated based on the hardware ones. Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx> Reported-by: Chao Gao <chao.gao@xxxxxxxxx> --- Cc: Jan Beulich <jbeulich@xxxxxxxx> Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> Cc: Julien Grall <julien.grall@xxxxxxx> Cc: Chao Gao <chao.gao@xxxxxxxxx> --- Changes since v1: - Calculate the maximum GSI in vioapic_init taking into account the base GSI of each IO APIC. - Use the vioapic base_gsi field in the PVH Dom0 build in order to populate the MADT IO APIC entries. I think this is a good canditate to be included in 4.9, it's a bugfix, and it's only used by PVH Dom0, so the risk is low IMHO. --- xen/arch/x86/hvm/dom0_build.c | 2 +- xen/arch/x86/hvm/vioapic.c | 45 +++++++++++++++++---------------------- xen/include/asm-x86/hvm/vioapic.h | 1 + 3 files changed, 21 insertions(+), 27 deletions(-) diff --git a/xen/arch/x86/hvm/dom0_build.c b/xen/arch/x86/hvm/dom0_build.c index db9be87612..020c355faf 100644 --- a/xen/arch/x86/hvm/dom0_build.c +++ b/xen/arch/x86/hvm/dom0_build.c @@ -719,7 +719,7 @@ static int __init pvh_setup_acpi_madt(struct domain *d, paddr_t *addr) io_apic->header.length = sizeof(*io_apic); io_apic->id = domain_vioapic(d, i)->id; io_apic->address = domain_vioapic(d, i)->base_address; - io_apic->global_irq_base = io_apic_gsi_base(i); + io_apic->global_irq_base = domain_vioapic(d, i)->base_gsi; io_apic++; } diff --git a/xen/arch/x86/hvm/vioapic.c b/xen/arch/x86/hvm/vioapic.c index 5157db7a4e..c511fa7db6 100644 --- a/xen/arch/x86/hvm/vioapic.c +++ b/xen/arch/x86/hvm/vioapic.c @@ -64,37 +64,23 @@ static struct hvm_vioapic *addr_vioapic(const struct domain *d, struct hvm_vioapic *gsi_vioapic(const struct domain *d, unsigned int gsi, unsigned int *pin) { - unsigned int i, base_gsi = 0; + unsigned int i; for ( i = 0; i < d->arch.hvm_domain.nr_vioapics; i++ ) { struct hvm_vioapic *vioapic = domain_vioapic(d, i); - if ( gsi >= base_gsi && gsi < base_gsi + vioapic->nr_pins ) + if ( gsi >= vioapic->base_gsi && + gsi < vioapic->base_gsi + vioapic->nr_pins ) { - *pin = gsi - base_gsi; + *pin = gsi - vioapic->base_gsi; return vioapic; } - - base_gsi += vioapic->nr_pins; } return NULL; } -static unsigned int base_gsi(const struct domain *d, - const struct hvm_vioapic *vioapic) -{ - unsigned int nr_vioapics = d->arch.hvm_domain.nr_vioapics; - unsigned int base_gsi = 0, i = 0; - const struct hvm_vioapic *tmp; - - while ( i < nr_vioapics && (tmp = domain_vioapic(d, i++)) != vioapic ) - base_gsi += tmp->nr_pins; - - return base_gsi; -} - static uint32_t vioapic_read_indirect(const struct hvm_vioapic *vioapic) { uint32_t result = 0; @@ -180,7 +166,7 @@ static void vioapic_write_redirent( struct hvm_irq *hvm_irq = hvm_domain_irq(d); union vioapic_redir_entry *pent, ent; int unmasked = 0; - unsigned int gsi = base_gsi(d, vioapic) + idx; + unsigned int gsi = vioapic->base_gsi + idx; spin_lock(&d->arch.hvm_domain.irq_lock); @@ -340,7 +326,7 @@ static void vioapic_deliver(struct hvm_vioapic *vioapic, unsigned int pin) struct domain *d = vioapic_domain(vioapic); struct vlapic *target; struct vcpu *v; - unsigned int irq = base_gsi(d, vioapic) + pin; + unsigned int irq = vioapic->base_gsi + pin; ASSERT(spin_is_locked(&d->arch.hvm_domain.irq_lock)); @@ -451,7 +437,7 @@ void vioapic_update_EOI(struct domain *d, u8 vector) { struct hvm_irq *hvm_irq = hvm_domain_irq(d); union vioapic_redir_entry *ent; - unsigned int i, base_gsi = 0; + unsigned int i; ASSERT(has_vioapic(d)); @@ -473,19 +459,18 @@ void vioapic_update_EOI(struct domain *d, u8 vector) if ( iommu_enabled ) { spin_unlock(&d->arch.hvm_domain.irq_lock); - hvm_dpci_eoi(d, base_gsi + pin, ent); + hvm_dpci_eoi(d, vioapic->base_gsi + pin, ent); spin_lock(&d->arch.hvm_domain.irq_lock); } if ( (ent->fields.trig_mode == VIOAPIC_LEVEL_TRIG) && !ent->fields.mask && - hvm_irq->gsi_assert_count[base_gsi + pin] ) + hvm_irq->gsi_assert_count[vioapic->base_gsi + pin] ) { ent->fields.remote_irr = 1; vioapic_deliver(vioapic, pin); } } - base_gsi += vioapic->nr_pins; } spin_unlock(&d->arch.hvm_domain.irq_lock); @@ -554,6 +539,7 @@ void vioapic_reset(struct domain *d) { vioapic->base_address = mp_ioapics[i].mpc_apicaddr; vioapic->id = mp_ioapics[i].mpc_apicid; + vioapic->base_gsi = io_apic_gsi_base(i); } vioapic->nr_pins = nr_pins; vioapic->domain = d; @@ -590,6 +576,8 @@ int vioapic_init(struct domain *d) { unsigned int nr_pins = is_hardware_domain(d) ? nr_ioapic_entries[i] : ARRAY_SIZE(domain_vioapic(d, 0)->domU.redirtbl); + unsigned int base_gsi = is_hardware_domain(d) ? io_apic_gsi_base(i) + : 0; if ( (domain_vioapic(d, i) = xmalloc_bytes(hvm_vioapic_size(nr_pins))) == NULL ) @@ -598,10 +586,15 @@ int vioapic_init(struct domain *d) return -ENOMEM; } domain_vioapic(d, i)->nr_pins = nr_pins; - nr_gsis += nr_pins; + nr_gsis = max(nr_gsis, base_gsi + nr_pins); } - ASSERT(hvm_domain_irq(d)->nr_gsis == nr_gsis); + /* + * NB: hvm_domain_irq(d)->nr_gsis is actually the highest GSI + 1, but + * there might be holes in this range (ie: GSIs that don't belong to any + * vIO APIC). + */ + ASSERT(hvm_domain_irq(d)->nr_gsis >= nr_gsis); d->arch.hvm_domain.nr_vioapics = nr_vioapics; vioapic_reset(d); diff --git a/xen/include/asm-x86/hvm/vioapic.h b/xen/include/asm-x86/hvm/vioapic.h index 8ec91d2bd1..2ceb60eaef 100644 --- a/xen/include/asm-x86/hvm/vioapic.h +++ b/xen/include/asm-x86/hvm/vioapic.h @@ -50,6 +50,7 @@ struct hvm_vioapic { struct domain *domain; uint32_t nr_pins; + unsigned int base_gsi; union { XEN_HVM_VIOAPIC(,); struct hvm_hw_vioapic domU; -- 2.11.0 (Apple Git-81) _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |