[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH 2/4] pass struct irq_desc * to set_affinity() IRQ accessors
On 20/09/11 16:43, Jan Beulich wrote: > This is because the descriptor is generally more useful (with the IRQ > number being accessible in it if necessary) and going forward will > hopefully allow to remove all direct accesses to the IRQ descriptor > array, in turn making it possible to make this some other, more > efficient data structure. Do be aware that certain bits of the code use pointer arithmetic between an irq_{cfg,desc} * and the base of the array to work out the irq value. Changing the data structure will likely invalidate that logic in subtle ways. ~Andrew > Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> > > --- a/xen/arch/ia64/linux-xen/iosapic.c > +++ b/xen/arch/ia64/linux-xen/iosapic.c > @@ -352,18 +352,18 @@ unmask_irq (unsigned int irq) > > > static void > -iosapic_set_affinity (unsigned int irq, const cpumask_t *mask) > +iosapic_set_affinity (struct irq_desc *desc, const cpumask_t *mask) > { > #ifdef CONFIG_SMP > unsigned long flags; > u32 high32, low32; > int dest, rte_index; > char __iomem *addr; > - int redir = (irq & IA64_IRQ_REDIRECTED) ? 1 : 0; > + int redir = (desc->irq & IA64_IRQ_REDIRECTED) ? 1 : 0; > + unsigned int irq = desc->irq & ~IA64_IRQ_REDIRECTED; > ia64_vector vec; > struct iosapic_rte_info *rte; > > - irq &= (~IA64_IRQ_REDIRECTED); > vec = irq_to_vector(irq); > > if (cpumask_empty(mask)) > --- a/xen/arch/x86/hpet.c > +++ b/xen/arch/x86/hpet.c > @@ -258,24 +258,14 @@ static void hpet_msi_mask(unsigned int i > hpet_write32(cfg, HPET_Tn_CFG(ch->idx)); > } > > -static void hpet_msi_write(unsigned int irq, struct msi_msg *msg) > +static void hpet_msi_write(struct hpet_event_channel *ch, struct msi_msg > *msg) > { > - unsigned int ch_idx = irq_to_channel(irq); > - struct hpet_event_channel *ch = hpet_events + ch_idx; > - > - BUG_ON(ch_idx >= num_hpets_used); > - > hpet_write32(msg->data, HPET_Tn_ROUTE(ch->idx)); > hpet_write32(msg->address_lo, HPET_Tn_ROUTE(ch->idx) + 4); > } > > -static void hpet_msi_read(unsigned int irq, struct msi_msg *msg) > +static void hpet_msi_read(struct hpet_event_channel *ch, struct msi_msg *msg) > { > - unsigned int ch_idx = irq_to_channel(irq); > - struct hpet_event_channel *ch = hpet_events + ch_idx; > - > - BUG_ON(ch_idx >= num_hpets_used); > - > msg->data = hpet_read32(HPET_Tn_ROUTE(ch->idx)); > msg->address_lo = hpet_read32(HPET_Tn_ROUTE(ch->idx) + 4); > msg->address_hi = 0; > @@ -305,23 +295,22 @@ static void hpet_msi_end(unsigned int ir > { > } > > -static void hpet_msi_set_affinity(unsigned int irq, const cpumask_t *mask) > +static void hpet_msi_set_affinity(struct irq_desc *desc, const cpumask_t > *mask) > { > struct msi_msg msg; > unsigned int dest; > - struct irq_desc * desc = irq_to_desc(irq); > struct irq_cfg *cfg= desc->chip_data; > > dest = set_desc_affinity(desc, mask); > if (dest == BAD_APICID) > return; > > - hpet_msi_read(irq, &msg); > + hpet_msi_read(desc->action->dev_id, &msg); > msg.data &= ~MSI_DATA_VECTOR_MASK; > msg.data |= MSI_DATA_VECTOR(cfg->vector); > msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; > msg.address_lo |= MSI_ADDR_DEST_ID(dest); > - hpet_msi_write(irq, &msg); > + hpet_msi_write(desc->action->dev_id, &msg); > } > > /* > @@ -338,12 +327,12 @@ static hw_irq_controller hpet_msi_type = > .set_affinity = hpet_msi_set_affinity, > }; > > -static void __hpet_setup_msi_irq(unsigned int irq) > +static void __hpet_setup_msi_irq(struct irq_desc *desc) > { > struct msi_msg msg; > > - msi_compose_msg(irq, &msg); > - hpet_msi_write(irq, &msg); > + msi_compose_msg(desc->irq, &msg); > + hpet_msi_write(desc->action->dev_id, &msg); > } > > static int __init hpet_setup_msi_irq(unsigned int irq) > @@ -357,7 +346,7 @@ static int __init hpet_setup_msi_irq(uns > if ( ret < 0 ) > return ret; > > - __hpet_setup_msi_irq(irq); > + __hpet_setup_msi_irq(desc); > > return 0; > } > @@ -471,7 +460,7 @@ static void hpet_attach_channel(unsigned > if ( ch->cpu != cpu ) > return; > > - hpet_msi_set_affinity(ch->irq, cpumask_of(ch->cpu)); > + hpet_msi_set_affinity(irq_to_desc(ch->irq), cpumask_of(ch->cpu)); > } > > static void hpet_detach_channel(unsigned int cpu, > @@ -493,7 +482,7 @@ static void hpet_detach_channel(unsigned > } > > ch->cpu = first_cpu(ch->cpumask); > - hpet_msi_set_affinity(ch->irq, cpumask_of(ch->cpu)); > + hpet_msi_set_affinity(irq_to_desc(ch->irq), cpumask_of(ch->cpu)); > } > > #include <asm/mc146818rtc.h> > @@ -619,7 +608,7 @@ void hpet_broadcast_resume(void) > for ( i = 0; i < n; i++ ) > { > if ( hpet_events[i].irq >= 0 ) > - __hpet_setup_msi_irq(hpet_events[i].irq); > + __hpet_setup_msi_irq(irq_to_desc(hpet_events[i].irq)); > > /* set HPET Tn as oneshot */ > cfg = hpet_read32(HPET_Tn_CFG(hpet_events[i].idx)); > --- a/xen/arch/x86/io_apic.c > +++ b/xen/arch/x86/io_apic.c > @@ -658,7 +658,7 @@ unsigned int set_desc_affinity(struct ir > } > > static void > -set_ioapic_affinity_irq_desc(struct irq_desc *desc, const cpumask_t *mask) > +set_ioapic_affinity_irq(struct irq_desc *desc, const cpumask_t *mask) > { > unsigned long flags; > unsigned int dest; > @@ -695,16 +695,6 @@ set_ioapic_affinity_irq_desc(struct irq_ > spin_unlock_irqrestore(&ioapic_lock, flags); > > } > - > -static void > -set_ioapic_affinity_irq(unsigned int irq, const struct cpumask *mask) > -{ > - struct irq_desc *desc; > - > - desc = irq_to_desc(irq); > - > - set_ioapic_affinity_irq_desc(desc, mask); > -} > #endif /* CONFIG_SMP */ > > /* > @@ -802,7 +792,7 @@ void /*__init*/ setup_ioapic_dest(void) > irq = pin_2_irq(irq_entry, ioapic, pin); > cfg = irq_cfg(irq); > BUG_ON(cpus_empty(cfg->cpu_mask)); > - set_ioapic_affinity_irq(irq, &cfg->cpu_mask); > + set_ioapic_affinity_irq(irq_to_desc(irq), &cfg->cpu_mask); > } > > } > @@ -1780,7 +1770,7 @@ static void mask_and_ack_level_ioapic_ir > > if ((irq_desc[irq].status & IRQ_MOVE_PENDING) && > !io_apic_level_ack_pending(irq)) > - move_masked_irq(irq); > + move_masked_irq(desc); > > if ( !(v & (1 << (i & 0x1f))) ) { > spin_lock(&ioapic_lock); > @@ -1799,7 +1789,9 @@ static void end_level_ioapic_irq (unsign > { > if ( directed_eoi_enabled ) > { > - if ( !(irq_desc[irq].status & (IRQ_DISABLED|IRQ_MOVE_PENDING)) ) > + struct irq_desc *desc = irq_to_desc(irq); > + > + if ( !(desc->status & (IRQ_DISABLED|IRQ_MOVE_PENDING)) ) > { > eoi_IO_APIC_irq(irq); > return; > @@ -1807,9 +1799,9 @@ static void end_level_ioapic_irq (unsign > > mask_IO_APIC_irq(irq); > eoi_IO_APIC_irq(irq); > - if ( (irq_desc[irq].status & IRQ_MOVE_PENDING) && > + if ( (desc->status & IRQ_MOVE_PENDING) && > !io_apic_level_ack_pending(irq) ) > - move_masked_irq(irq); > + move_masked_irq(desc); > } > > if ( !(irq_desc[irq].status & IRQ_DISABLED) ) > --- a/xen/arch/x86/irq.c > +++ b/xen/arch/x86/irq.c > @@ -557,10 +557,8 @@ void __setup_vector_irq(int cpu) > } > } > > -void move_masked_irq(int irq) > +void move_masked_irq(struct irq_desc *desc) > { > - struct irq_desc *desc = irq_to_desc(irq); > - > if (likely(!(desc->status & IRQ_MOVE_PENDING))) > return; > > @@ -582,7 +580,7 @@ void move_masked_irq(int irq) > * For correct operation this depends on the caller masking the irqs. > */ > if (likely(cpus_intersects(desc->pending_mask, cpu_online_map))) > - desc->handler->set_affinity(irq, &desc->pending_mask); > + desc->handler->set_affinity(desc, &desc->pending_mask); > > cpus_clear(desc->pending_mask); > } > @@ -598,7 +596,7 @@ void move_native_irq(int irq) > return; > > desc->handler->disable(irq); > - move_masked_irq(irq); > + move_masked_irq(desc); > desc->handler->enable(irq); > } > > @@ -1409,7 +1407,7 @@ int pirq_guest_bind(struct vcpu *v, stru > /* Attempt to bind the interrupt target to the correct CPU. */ > cpu_set(v->processor, cpumask); > if ( !opt_noirqbalance && (desc->handler->set_affinity != NULL) ) > - desc->handler->set_affinity(irq, &cpumask); > + desc->handler->set_affinity(desc, &cpumask); > } > else if ( !will_share || !action->shareable ) > { > @@ -1963,7 +1961,7 @@ void fixup_irqs(void) > desc->handler->disable(irq); > > if ( desc->handler->set_affinity ) > - desc->handler->set_affinity(irq, &affinity); > + desc->handler->set_affinity(desc, &affinity); > else if ( !(warned++) ) > set_affinity = 0; > > --- a/xen/arch/x86/msi.c > +++ b/xen/arch/x86/msi.c > @@ -266,11 +266,10 @@ static void write_msi_msg(struct msi_des > } > } > > -void set_msi_affinity(unsigned int irq, const cpumask_t *mask) > +void set_msi_affinity(struct irq_desc *desc, const cpumask_t *mask) > { > struct msi_msg msg; > unsigned int dest; > - struct irq_desc *desc = irq_to_desc(irq); > struct msi_desc *msi_desc = desc->msi_desc; > struct irq_cfg *cfg = desc->chip_data; > > --- a/xen/drivers/passthrough/amd/iommu_init.c > +++ b/xen/drivers/passthrough/amd/iommu_init.c > @@ -344,12 +344,11 @@ static void amd_iommu_reset_event_log(st > set_iommu_event_log_control(iommu, IOMMU_CONTROL_ENABLED); > } > > -static void iommu_msi_set_affinity(unsigned int irq, const cpumask_t *mask) > +static void iommu_msi_set_affinity(struct irq_desc *desc, const cpumask_t > *mask) > { > struct msi_msg msg; > unsigned int dest; > - struct amd_iommu *iommu = irq_to_iommu[irq]; > - struct irq_desc *desc = irq_to_desc(irq); > + struct amd_iommu *iommu = desc->action->dev_id; > struct irq_cfg *cfg = desc->chip_data; > u8 bus = (iommu->bdf >> 8) & 0xff; > u8 dev = PCI_SLOT(iommu->bdf & 0xff); > @@ -591,7 +590,7 @@ static void enable_iommu(struct amd_iomm > register_iommu_event_log_in_mmio_space(iommu); > register_iommu_exclusion_range(iommu); > > - iommu_msi_set_affinity(iommu->irq, &cpu_online_map); > + iommu_msi_set_affinity(irq_to_desc(iommu->irq), &cpu_online_map); > amd_iommu_msi_enable(iommu, IOMMU_CONTROL_ENABLED); > > set_iommu_command_buffer_control(iommu, IOMMU_CONTROL_ENABLED); > --- a/xen/drivers/passthrough/vtd/iommu.c > +++ b/xen/drivers/passthrough/vtd/iommu.c > @@ -998,14 +998,12 @@ static void dma_msi_end(unsigned int irq > ack_APIC_irq(); > } > > -static void dma_msi_set_affinity(unsigned int irq, const cpumask_t *mask) > +static void dma_msi_set_affinity(struct irq_desc *desc, const cpumask_t > *mask) > { > struct msi_msg msg; > unsigned int dest; > unsigned long flags; > - > - struct iommu *iommu = irq_to_iommu[irq]; > - struct irq_desc *desc = irq_to_desc(irq); > + struct iommu *iommu = desc->action->dev_id; > struct irq_cfg *cfg = desc->chip_data; > > #ifdef CONFIG_X86 > @@ -1984,7 +1982,7 @@ static int init_vtd_hw(void) > iommu = drhd->iommu; > > cfg = irq_cfg(iommu->irq); > - dma_msi_set_affinity(iommu->irq, &cfg->cpu_mask); > + dma_msi_set_affinity(irq_to_desc(iommu->irq), &cfg->cpu_mask); > > clear_fault_bits(iommu); > > --- a/xen/include/asm-x86/irq.h > +++ b/xen/include/asm-x86/irq.h > @@ -172,7 +172,7 @@ void unlock_vector_lock(void); > void __setup_vector_irq(int cpu); > > void move_native_irq(int irq); > -void move_masked_irq(int irq); > +void move_masked_irq(struct irq_desc *); > > int __assign_irq_vector(int irq, struct irq_cfg *, const cpumask_t *); > > --- a/xen/include/asm-x86/msi.h > +++ b/xen/include/asm-x86/msi.h > @@ -77,7 +77,7 @@ struct msi_desc; > /* Helper functions */ > extern void mask_msi_irq(unsigned int irq); > extern void unmask_msi_irq(unsigned int irq); > -extern void set_msi_affinity(unsigned int vector, const cpumask_t *); > +extern void set_msi_affinity(struct irq_desc *, const cpumask_t *); > extern int pci_enable_msi(struct msi_info *msi, struct msi_desc **desc); > extern void pci_disable_msi(struct msi_desc *desc); > extern void pci_cleanup_msi(struct pci_dev *pdev); > --- a/xen/include/xen/irq.h > +++ b/xen/include/xen/irq.h > @@ -33,6 +33,8 @@ struct irqaction { > #define NEVER_ASSIGN_IRQ (-2) > #define FREE_TO_ASSIGN_IRQ (-3) > > +struct irq_desc; > + > /* > * Interrupt controller descriptor. This is all we need > * to describe about the low-level hardware. > @@ -45,7 +47,7 @@ struct hw_interrupt_type { > void (*disable)(unsigned int irq); > void (*ack)(unsigned int irq); > void (*end)(unsigned int irq, u8 vector); > - void (*set_affinity)(unsigned int irq, const cpumask_t *); > + void (*set_affinity)(struct irq_desc *, const cpumask_t *); > }; > > typedef const struct hw_interrupt_type hw_irq_controller; > > -- Andrew Cooper - Dom0 Kernel Engineer, Citrix XenServer T: +44 (0)1223 225 900, http://www.citrix.com _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |