|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 3/3] x86/AMD-Vi: Fix IVRS HPET special->handle override
From: Suravee Suthikulpanit <suravee.suthikulpanit@xxxxxxx>
The current logic does not handle the case when HPET special->handle
is invalid in IVRS. On such system, the following message is shown:
(XEN) AMD-Vi: Failed to setup HPET MSI remapping: Wrong HPET
This patch will allow the ivrs_hpet[<handle>]=<sbdf> to override the
IVRS.
Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@xxxxxxx>
---
xen/drivers/passthrough/amd/iommu_acpi.c | 22 +++++++++++++++-------
1 file changed, 15 insertions(+), 7 deletions(-)
diff --git a/xen/drivers/passthrough/amd/iommu_acpi.c
b/xen/drivers/passthrough/amd/iommu_acpi.c
index 0778af0..bfcc2eb 100644
--- a/xen/drivers/passthrough/amd/iommu_acpi.c
+++ b/xen/drivers/passthrough/amd/iommu_acpi.c
@@ -671,6 +671,7 @@ static void __init parse_ivrs_hpet(char *str)
if ( !s || *s )
return;
+ hpet_sbdf.id = id;
hpet_sbdf.bdf = PCI_BDF(bus, dev, func);
hpet_sbdf.seg = seg;
hpet_sbdf.cmdline = 1;
@@ -787,19 +788,26 @@ static u16 __init parse_ivhd_device_special(
}
break;
case ACPI_IVHD_HPET:
+ if ( hpet_sbdf.cmdline )
+ {
+ AMD_IOMMU_DEBUG("IVHD: Command line override present for HPET %#x "
+ "(IVRS: %#x devID %04x:%02x:%02x.%u)\n",
+ hpet_sbdf.id, special->handle, seg, PCI_BUS(bdf),
+ PCI_SLOT(bdf), PCI_FUNC(bdf));
+ hpet_sbdf.iommu = iommu;
+ break;
+ }
+
/* set device id of hpet */
- if ( hpet_sbdf.iommu ||
- (hpet_sbdf.cmdline && hpet_sbdf.id != special->handle) )
+ if ( hpet_sbdf.iommu )
{
printk(XENLOG_WARNING "Only one IVHD HPET entry is supported\n");
break;
}
+
hpet_sbdf.id = special->handle;
- if ( !hpet_sbdf.cmdline )
- {
- hpet_sbdf.bdf = bdf;
- hpet_sbdf.seg = seg;
- }
+ hpet_sbdf.bdf = bdf;
+ hpet_sbdf.seg = seg;
hpet_sbdf.iommu = iommu;
break;
default:
--
1.8.1.2
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |