[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH v2 3/7] x86/hvm: convert gsi_assert_count into a variable size array
>>> On 27.03.17 at 12:18, <roger.pau@xxxxxxxxxx> wrote: > Rearrange the fields of hvm_irq so that gsi_assert_count can be converted into > a variable size array and add a new field to account the number of GSIs. > --- a/xen/arch/x86/hvm/hvm.c > +++ b/xen/arch/x86/hvm/hvm.c > @@ -457,7 +457,7 @@ void hvm_migrate_pirqs(struct vcpu *v) > { > struct domain *d = v->domain; > > - if ( !iommu_enabled || !d->arch.hvm_domain.irq.dpci ) > + if ( !iommu_enabled || !d->arch.hvm_domain.irq->dpci ) If the prior patch is really needed - why did it not convert this? (There are more such instances further down.) > @@ -122,7 +124,7 @@ void hvm_isa_irq_assert( > struct hvm_irq *hvm_irq = hvm_domain_irq(d); > unsigned int gsi = hvm_isa_irq_to_gsi(isa_irq); > > - ASSERT(isa_irq <= 15); > + ASSERT(isa_irq <= 15 && isa_irq < hvm_irq->nr_gsis); > > spin_lock(&d->arch.hvm_domain.irq_lock); > > @@ -139,7 +141,7 @@ void hvm_isa_irq_deassert( > struct hvm_irq *hvm_irq = hvm_domain_irq(d); > unsigned int gsi = hvm_isa_irq_to_gsi(isa_irq); > > - ASSERT(isa_irq <= 15); > + ASSERT(isa_irq <= 15 && isa_irq < hvm_irq->nr_gsis); Not sure about these two - perhaps they'd better be BUILD_BUG_ON()s for the DomU side, and Dom0 should never be allocated less than 16? > @@ -495,7 +497,7 @@ static void irq_dump(struct domain *d) > (uint32_t) hvm_irq->isa_irq.pad[0], > hvm_irq->pci_link.route[0], hvm_irq->pci_link.route[1], > hvm_irq->pci_link.route[2], hvm_irq->pci_link.route[3]); > - for ( i = 0 ; i < VIOAPIC_NUM_PINS; i += 8 ) > + for ( i = 0; i < hvm_irq->nr_gsis && i + 8 <= hvm_irq->nr_gsis; i += 8 ) > printk("GSI [%x - %x] %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8 > " %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8"\n", > i, i+7, > @@ -507,6 +509,13 @@ static void irq_dump(struct domain *d) > hvm_irq->gsi_assert_count[i+5], > hvm_irq->gsi_assert_count[i+6], > hvm_irq->gsi_assert_count[i+7]); > + if ( i != hvm_irq->nr_gsis ) > + { > + printk("GSI [%x - %x]", i, hvm_irq->nr_gsis - 1); > + for ( ; i < hvm_irq->nr_gsis; i++) > + printk(" %2.2"PRIu8, hvm_irq->gsi_assert_count[i]); > + printk("\n"); > + } I realize you've just copied this from existing code, but what advantage does %2.2 have over just %2 here? > @@ -545,6 +554,9 @@ static int irq_save_pci(struct domain *d, > hvm_domain_context_t *h) > unsigned int asserted, pdev, pintx; > int rc; > > + if ( hvm_irq->nr_gsis != VIOAPIC_NUM_PINS ) > + return -EOPNOTSUPP; > + > spin_lock(&d->arch.hvm_domain.irq_lock); > > pdev = hvm_irq->callback_via.pci.dev; > @@ -592,6 +604,9 @@ static int irq_load_pci(struct domain *d, > hvm_domain_context_t *h) > struct hvm_irq *hvm_irq = hvm_domain_irq(d); > int link, dev, intx, gsi; > > + if ( hvm_irq->nr_gsis != VIOAPIC_NUM_PINS ) > + return -EOPNOTSUPP; Why do you add these checks here? > --- a/xen/include/asm-x86/domain.h > +++ b/xen/include/asm-x86/domain.h > @@ -17,7 +17,7 @@ > #define is_pv_32bit_vcpu(v) (is_pv_32bit_domain((v)->domain)) > > #define is_hvm_pv_evtchn_domain(d) (is_hvm_domain(d) && \ > - d->arch.hvm_domain.irq.callback_via_type == HVMIRQ_callback_vector) > + d->arch.hvm_domain.irq->callback_via_type == HVMIRQ_callback_vector) Please take the opportunity and properly parenthesize the use of d here. > @@ -89,13 +77,27 @@ struct hvm_irq { > u8 round_robin_prev_vcpu; > > struct hvm_irq_dpci *dpci; > + > + /* > + * Number of wires asserting each GSI. > + * > + * GSIs 0-15 are the ISA IRQs. ISA devices map directly into this space > + * except ISA IRQ 0, which is connected to GSI 2. > + * PCI links map into this space via the PCI-ISA bridge. > + * > + * GSIs 16+ are used only be PCI devices. The mapping from PCI device to > + * GSI is as follows: ((device*4 + device/8 + INTx#) & 31) + 16 > + */ > + unsigned int nr_gsis; > + u8 gsi_assert_count[]; > }; I'm afraid at the same time (or in a later patch) the type of this array (and maybe also the type of pci_link_assert_count[]) will need to be widened. Jan _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |