[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH for-4.9 v4 2/2] x86/vioapic: allow PVHv2 Dom0 to have more than one IO APIC
The base address, id and number of pins of the vIO APICs exposed to PVHv2 Dom0 is the same as the values found on bare metal. Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx> Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx> --- Cc: Jan Beulich <jbeulich@xxxxxxxx> Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> --- xen/arch/x86/hvm/dom0_build.c | 33 ++++++++++++--------------------- xen/arch/x86/hvm/hvm.c | 8 +++++--- xen/arch/x86/hvm/vioapic.c | 28 ++++++++++++++++++++++------ 3 files changed, 39 insertions(+), 30 deletions(-) diff --git a/xen/arch/x86/hvm/dom0_build.c b/xen/arch/x86/hvm/dom0_build.c index daa791d3f4..db9be87612 100644 --- a/xen/arch/x86/hvm/dom0_build.c +++ b/xen/arch/x86/hvm/dom0_build.c @@ -681,12 +681,7 @@ static int __init pvh_setup_acpi_madt(struct domain *d, paddr_t *addr) max_vcpus = dom0_max_vcpus(); /* Calculate the size of the crafted MADT. */ size = sizeof(*madt); - /* - * FIXME: the current vIO-APIC code just supports one IO-APIC instance - * per domain. This must be fixed in order to provide the same amount of - * IO APICs as available on bare metal. - */ - size += sizeof(*io_apic); + size += sizeof(*io_apic) * nr_ioapics; size += sizeof(*intsrcovr) * acpi_intr_overrides; size += sizeof(*nmisrc) * acpi_nmi_sources; size += sizeof(*x2apic) * max_vcpus; @@ -716,23 +711,19 @@ static int __init pvh_setup_acpi_madt(struct domain *d, paddr_t *addr) */ madt->header.revision = min_t(unsigned char, table->revision, 4); - /* - * Setup the IO APIC entry. - * FIXME: the current vIO-APIC code just supports one IO-APIC instance - * per domain. This must be fixed in order to provide the same amount of - * IO APICs as available on bare metal, and with the same IDs as found in - * the native IO APIC MADT entries. - */ - if ( nr_ioapics > 1 ) - printk("WARNING: found %d IO APICs, Dom0 will only have access to 1 emulated IO APIC\n", - nr_ioapics); + /* Setup the IO APIC entries. */ io_apic = (void *)(madt + 1); - io_apic->header.type = ACPI_MADT_TYPE_IO_APIC; - io_apic->header.length = sizeof(*io_apic); - io_apic->id = domain_vioapic(d, 0)->id; - io_apic->address = VIOAPIC_DEFAULT_BASE_ADDRESS; + for ( i = 0; i < nr_ioapics; i++ ) + { + io_apic->header.type = ACPI_MADT_TYPE_IO_APIC; + io_apic->header.length = sizeof(*io_apic); + io_apic->id = domain_vioapic(d, i)->id; + io_apic->address = domain_vioapic(d, i)->base_address; + io_apic->global_irq_base = io_apic_gsi_base(i); + io_apic++; + } - x2apic = (void *)(io_apic + 1); + x2apic = (void *)io_apic; for ( i = 0; i < max_vcpus; i++ ) { x2apic->header.type = ACPI_MADT_TYPE_LOCAL_X2APIC; diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index 6c3c944abd..9a9732b308 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -595,6 +595,7 @@ static int hvm_print_line( int hvm_domain_initialise(struct domain *d) { + unsigned int nr_gsis; int rc; if ( !hvm_enabled ) @@ -616,19 +617,20 @@ int hvm_domain_initialise(struct domain *d) if ( rc != 0 ) goto fail0; + nr_gsis = is_hardware_domain(d) ? nr_irqs_gsi : NR_HVM_DOMU_IRQS; d->arch.hvm_domain.pl_time = xzalloc(struct pl_time); d->arch.hvm_domain.params = xzalloc_array(uint64_t, HVM_NR_PARAMS); d->arch.hvm_domain.io_handler = xzalloc_array(struct hvm_io_handler, NR_IO_HANDLERS); - d->arch.hvm_domain.irq = xzalloc_bytes(hvm_irq_size(NR_HVM_DOMU_IRQS)); + d->arch.hvm_domain.irq = xzalloc_bytes(hvm_irq_size(nr_gsis)); rc = -ENOMEM; if ( !d->arch.hvm_domain.pl_time || !d->arch.hvm_domain.irq || !d->arch.hvm_domain.params || !d->arch.hvm_domain.io_handler ) goto fail1; - /* Set the default number of GSIs */ - hvm_domain_irq(d)->nr_gsis = NR_HVM_DOMU_IRQS; + /* Set the number of GSIs */ + hvm_domain_irq(d)->nr_gsis = nr_gsis; BUILD_BUG_ON(NR_HVM_DOMU_IRQS < NR_ISAIRQS); ASSERT(hvm_domain_irq(d)->nr_gsis >= NR_ISAIRQS); diff --git a/xen/arch/x86/hvm/vioapic.c b/xen/arch/x86/hvm/vioapic.c index 0badb055d8..8775aa46df 100644 --- a/xen/arch/x86/hvm/vioapic.c +++ b/xen/arch/x86/hvm/vioapic.c @@ -539,10 +539,19 @@ void vioapic_reset(struct domain *d) memset(vioapic, 0, hvm_vioapic_size(nr_pins)); for ( pin = 0; pin < nr_pins; pin++ ) vioapic->redirtbl[pin].fields.mask = 1; - ASSERT(!i); - vioapic->base_address = VIOAPIC_DEFAULT_BASE_ADDRESS + - VIOAPIC_MEM_LENGTH * 0; - vioapic->id = 0; + + if ( !is_hardware_domain(d) ) + { + ASSERT(!i); + vioapic->base_address = VIOAPIC_DEFAULT_BASE_ADDRESS + + VIOAPIC_MEM_LENGTH * 0; + vioapic->id = 0; + } + else + { + vioapic->base_address = mp_ioapics[i].mpc_apicaddr; + vioapic->id = mp_ioapics[i].mpc_apicid; + } vioapic->nr_pins = nr_pins; vioapic->domain = d; } @@ -559,8 +568,7 @@ static void vioapic_free(const struct domain *d, unsigned int nr_vioapics) int vioapic_init(struct domain *d) { - unsigned int i, nr_vioapics = 1; - unsigned int nr_pins = ARRAY_SIZE(domain_vioapic(d, 0)->domU.redirtbl); + unsigned int i, nr_vioapics, nr_gsis = 0; if ( !has_vioapic(d) ) { @@ -568,6 +576,8 @@ int vioapic_init(struct domain *d) return 0; } + nr_vioapics = is_hardware_domain(d) ? nr_ioapics : 1; + if ( (d->arch.hvm_domain.vioapic == NULL) && ((d->arch.hvm_domain.vioapic = xzalloc_array(struct hvm_vioapic *, nr_vioapics)) == NULL) ) @@ -575,6 +585,9 @@ int vioapic_init(struct domain *d) for ( i = 0; i < nr_vioapics; i++ ) { + unsigned int nr_pins = is_hardware_domain(d) ? nr_ioapic_entries[i] : + ARRAY_SIZE(domain_vioapic(d, 0)->domU.redirtbl); + if ( (domain_vioapic(d, i) = xmalloc_bytes(hvm_vioapic_size(nr_pins))) == NULL ) { @@ -582,8 +595,11 @@ int vioapic_init(struct domain *d) return -ENOMEM; } domain_vioapic(d, i)->nr_pins = nr_pins; + nr_gsis += nr_pins; } + ASSERT(hvm_domain_irq(d)->nr_gsis == nr_gsis); + d->arch.hvm_domain.nr_vioapics = nr_vioapics; vioapic_reset(d); -- 2.11.0 (Apple Git-81) _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |