>From 1c63734faeda247441f37ea3efd8629501eff79d Mon Sep 17 00:00:00 2001 From: Suravee Suthikulpanit Date: Wed, 24 Apr 2013 15:06:02 -0500 Subject: [PATCH 1/2] Fix IOAPIC interrupt routing issue introduced in the new IRTE indexing scheme With the new IRTE indexing scheme introduced in the patch http://lists.xen.org/archives/html/xen-devel/2013-04/msg02442.html IOAPIC routing entry (RTE) which has "mask" bit being sett does not get setup in the IOMMU interrupt remapping table. This causes the IOMMU to block interrupt from these devices. This patch fixes the issue by allocating IOMMU remapping entry even though and use the mask bit to "enable" or "disable" the IRTE. Signed-off-by: Suravee Suthikulpanit --- xen/drivers/passthrough/amd/iommu_intr.c | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/xen/drivers/passthrough/amd/iommu_intr.c b/xen/drivers/passthrough/amd/iommu_intr.c index a46064e..ed9ae79 100644 --- a/xen/drivers/passthrough/amd/iommu_intr.c +++ b/xen/drivers/passthrough/amd/iommu_intr.c @@ -75,9 +75,9 @@ static void free_intremap_entry(int seg, int bdf, int offset) } static void update_intremap_entry(u32* entry, u8 vector, u8 int_type, - u8 dest_mode, u8 dest) + u8 dest_mode, u8 dest, u8 mask) { - set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, 0, + set_field_in_reg_u32((mask == 0)? IOMMU_CONTROL_ENABLED: IOMMU_CONTROL_DISABLED, 0, INT_REMAP_ENTRY_REMAPEN_MASK, INT_REMAP_ENTRY_REMAPEN_SHIFT, entry); set_field_in_reg_u32(IOMMU_CONTROL_DISABLED, *entry, @@ -156,7 +156,9 @@ static int update_intremap_entry_from_ioapic( * Low half of incoming RTE is already in remapped format, * so need to recover vector and delivery mode from IRTE. */ - ASSERT(get_rte_index(rte) == offset); + if ((vector != 0) && (delivery_mode != 0)) + ASSERT(get_rte_index(rte) == offset); + vector = get_field_from_reg_u32(*entry, INT_REMAP_ENTRY_VECTOR_MASK, INT_REMAP_ENTRY_VECTOR_SHIFT); @@ -164,7 +166,7 @@ static int update_intremap_entry_from_ioapic( INT_REMAP_ENTRY_INTTYPE_MASK, INT_REMAP_ENTRY_INTTYPE_SHIFT); } - update_intremap_entry(entry, vector, delivery_mode, dest_mode, dest); + update_intremap_entry(entry, vector, delivery_mode, dest_mode, dest, rte->mask); spin_unlock_irqrestore(lock, flags); @@ -198,8 +200,6 @@ int __init amd_iommu_setup_ioapic_remapping(void) for ( pin = 0; pin < nr_ioapic_entries[apic]; pin++ ) { rte = __ioapic_read_entry(apic, pin, 1); - if ( rte.mask == 1 ) - continue; /* get device id of ioapic devices */ bdf = ioapic_sbdf[IO_APIC_ID(apic)].bdf; @@ -225,7 +225,7 @@ int __init amd_iommu_setup_ioapic_remapping(void) BUG_ON(offset >= INTREMAP_ENTRIES); entry = get_intremap_entry(iommu->seg, req_id, offset); update_intremap_entry(entry, vector, - delivery_mode, dest_mode, dest); + delivery_mode, dest_mode, dest, rte.mask); spin_unlock_irqrestore(lock, flags); set_rte_index(&rte, offset); @@ -240,6 +240,7 @@ int __init amd_iommu_setup_ioapic_remapping(void) } } } + return 0; } @@ -397,7 +398,7 @@ static int update_intremap_entry_from_msi_msg( } entry = get_intremap_entry(iommu->seg, req_id, offset); - update_intremap_entry(entry, vector, delivery_mode, dest_mode, dest); + update_intremap_entry(entry, vector, delivery_mode, dest_mode, dest, 0); spin_unlock_irqrestore(lock, flags); *data = (msg->data & ~(INTREMAP_ENTRIES - 1)) | offset; -- 1.7.10.4