x86/IRQ: eliminate irq_vector[] The vector is already being tracked in struct irq_desc's arch.vector member, so there's no real need for a second place where this to get stored. The only caveat is that legacy vectors (used for interrupts handled through the 8259) must be special cased to not prevent non- legacy vectors from being assigned. Signed-off-by: Jan Beulich --- a/xen/arch/x86/io_apic.c +++ b/xen/arch/x86/io_apic.c @@ -452,10 +452,10 @@ static void unmask_IO_APIC_irq(struct ir spin_unlock_irqrestore(&ioapic_lock, flags); } -static void __eoi_IO_APIC_irq(unsigned int irq) +static void __eoi_IO_APIC_irq(struct irq_desc *desc) { - struct irq_pin_list *entry = irq_2_pin + irq; - unsigned int pin, vector = IO_APIC_VECTOR(irq); + struct irq_pin_list *entry = irq_2_pin + desc->irq; + unsigned int pin, vector = desc->arch.vector; for (;;) { pin = entry->pin; @@ -468,11 +468,11 @@ static void __eoi_IO_APIC_irq(unsigned i } } -static void eoi_IO_APIC_irq(unsigned int irq) +static void eoi_IO_APIC_irq(struct irq_desc *desc) { unsigned long flags; spin_lock_irqsave(&ioapic_lock, flags); - __eoi_IO_APIC_irq(irq); + __eoi_IO_APIC_irq(desc); spin_unlock_irqrestore(&ioapic_lock, flags); } @@ -1200,7 +1200,7 @@ static void /*__init*/ __print_IO_APIC(v struct irq_pin_list *entry = irq_2_pin + i; if (entry->pin < 0) continue; - printk(KERN_DEBUG "IRQ%d ", IO_APIC_VECTOR(i)); + printk(KERN_DEBUG "IRQ%d ", irq_to_desc(i)->arch.vector); for (;;) { printk("-> %d:%d", entry->apic, entry->pin); if (!entry->next) @@ -1621,7 +1621,7 @@ static void mask_and_ack_level_ioapic_ir * operation to prevent an edge-triggered interrupt escaping meanwhile. * The idea is from Manfred Spraul. --macro */ - i = IO_APIC_VECTOR(desc->irq); + i = desc->arch.vector; v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1)); @@ -1653,12 +1653,12 @@ static void end_level_ioapic_irq(struct { if ( !(desc->status & (IRQ_DISABLED|IRQ_MOVE_PENDING)) ) { - eoi_IO_APIC_irq(desc->irq); + eoi_IO_APIC_irq(desc); return; } mask_IO_APIC_irq(desc); - eoi_IO_APIC_irq(desc->irq); + eoi_IO_APIC_irq(desc); if ( (desc->status & IRQ_MOVE_PENDING) && !io_apic_level_ack_pending(desc->irq) ) move_masked_irq(desc); @@ -1689,7 +1689,7 @@ static void end_level_ioapic_irq(struct * operation to prevent an edge-triggered interrupt escaping meanwhile. * The idea is from Manfred Spraul. --macro */ - i = IO_APIC_VECTOR(desc->irq); + i = desc->arch.vector; /* Manually EOI the old vector if we are moving to the new */ if ( vector && i != vector ) @@ -1752,7 +1752,7 @@ static inline void init_IO_APIC_traps(vo int irq; /* Xen: This is way simpler than the Linux implementation. */ for (irq = 0; platform_legacy_irq(irq); irq++) - if (IO_APIC_IRQ(irq) && !IO_APIC_VECTOR(irq)) + if (IO_APIC_IRQ(irq) && !irq_to_vector(irq)) make_8259A_irq(irq); } --- a/xen/arch/x86/irq.c +++ b/xen/arch/x86/irq.c @@ -41,7 +41,6 @@ custom_param("irq_vector_map", parse_irq vmask_t global_used_vector_map; -u8 __read_mostly *irq_vector; struct irq_desc __read_mostly *irq_desc = NULL; static DECLARE_BITMAP(used_vectors, NR_VECTORS); @@ -135,8 +134,6 @@ static int __init __bind_irq_vector(int set_bit(vector, desc->arch.used_vectors); } desc->arch.used = IRQ_USED; - if (IO_APIC_IRQ(irq)) - irq_vector[irq] = vector; return 0; } @@ -289,7 +286,11 @@ int irq_to_vector(int irq) BUG_ON(irq >= nr_irqs || irq < 0); if (IO_APIC_IRQ(irq)) - vector = irq_vector[irq]; + { + vector = irq_to_desc(irq)->arch.vector; + if (vector >= FIRST_LEGACY_VECTOR && vector <= LAST_LEGACY_VECTOR) + vector = 0; + } else if (MSI_IRQ(irq)) vector = irq_to_desc(irq)->arch.vector; else @@ -331,9 +332,8 @@ int __init init_irq_data(void) this_cpu(vector_irq)[vector] = -1; irq_desc = xzalloc_array(struct irq_desc, nr_irqs); - irq_vector = xzalloc_array(u8, nr_irqs_gsi); - if ( !irq_desc || !irq_vector ) + if ( !irq_desc ) return -ENOMEM; for (irq = 0; irq < nr_irqs_gsi; irq++) { @@ -426,7 +426,7 @@ static int __assign_irq_vector( vmask_t *irq_used_vectors = NULL; old_vector = irq_to_vector(irq); - if (old_vector) { + if (old_vector > 0) { cpumask_and(&tmp_mask, mask, &cpu_online_map); if (cpumask_intersects(&tmp_mask, desc->arch.cpu_mask)) { desc->arch.vector = old_vector; @@ -485,7 +485,7 @@ next: /* Found one! */ current_vector = vector; current_offset = offset; - if (old_vector) { + if (old_vector > 0) { desc->arch.move_in_progress = 1; cpumask_copy(desc->arch.old_cpu_mask, desc->arch.cpu_mask); desc->arch.old_vector = desc->arch.vector; @@ -501,9 +501,6 @@ next: || (desc->arch.used_vectors == irq_used_vectors)); desc->arch.used_vectors = irq_used_vectors; - if (IO_APIC_IRQ(irq)) - irq_vector[irq] = vector; - if ( desc->arch.used_vectors ) { ASSERT(!test_bit(vector, desc->arch.used_vectors)); --- a/xen/arch/x86/smpboot.c +++ b/xen/arch/x86/smpboot.c @@ -998,7 +998,7 @@ void __init smp_intr_init(void) * IRQ0 must be given a fixed assignment and initialized, * because it's used before the IO-APIC is set up. */ - irq_vector[0] = FIRST_HIPRIORITY_VECTOR; + irq_to_desc(0)->arch.vector = FIRST_HIPRIORITY_VECTOR; /* * Also ensure serial interrupts are high priority. We do not @@ -1008,7 +1008,6 @@ void __init smp_intr_init(void) { if ( (irq = serial_irq(seridx)) < 0 ) continue; - irq_vector[irq] = FIRST_HIPRIORITY_VECTOR + seridx + 1; per_cpu(vector_irq, cpu)[FIRST_HIPRIORITY_VECTOR + seridx + 1] = irq; irq_to_desc(irq)->arch.vector = FIRST_HIPRIORITY_VECTOR + seridx + 1; cpumask_copy(irq_to_desc(irq)->arch.cpu_mask, &cpu_online_map); --- a/xen/include/asm-x86/irq.h +++ b/xen/include/asm-x86/irq.h @@ -14,7 +14,6 @@ #define IO_APIC_IRQ(irq) (platform_legacy_irq(irq) ? \ (1 << (irq)) & io_apic_irqs : \ (irq) < nr_irqs_gsi) -#define IO_APIC_VECTOR(irq) (irq_vector[irq]) #define MSI_IRQ(irq) ((irq) >= nr_irqs_gsi && (irq) < nr_irqs) @@ -48,8 +47,6 @@ struct arch_irq_desc { typedef int vector_irq_t[NR_VECTORS]; DECLARE_PER_CPU(vector_irq_t, vector_irq); -extern u8 *irq_vector; - extern bool_t opt_noirqbalance; #define OPT_IRQ_VECTOR_MAP_DEFAULT 0 /* Do the default thing */